Merge pull request #18096 from markdroth/lb_policy_api

Second attempt: LB policy picker API
reviewable/pr18081/r3^2
Mark D. Roth 6 years ago committed by GitHub
commit 5f00cfd3bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      BUILD
  2. 12
      CMakeLists.txt
  3. 12
      Makefile
  4. 4
      build.yaml
  5. 2
      config.m4
  6. 2
      config.w32
  7. 2
      gRPC-C++.podspec
  8. 6
      gRPC-Core.podspec
  9. 4
      grpc.gemspec
  10. 8
      grpc.gyp
  11. 4
      package.xml
  12. 681
      src/core/ext/filters/client_channel/client_channel.cc
  13. 42
      src/core/ext/filters/client_channel/lb_policy.cc
  14. 302
      src/core/ext/filters/client_channel/lb_policy.h
  15. 979
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  16. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc
  17. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
  18. 295
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  19. 373
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  20. 13
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  21. 634
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  22. 946
      src/core/ext/filters/client_channel/request_routing.cc
  23. 181
      src/core/ext/filters/client_channel/request_routing.h
  24. 452
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  25. 135
      src/core/ext/filters/client_channel/resolving_lb_policy.h
  26. 23
      src/core/ext/filters/client_channel/subchannel.cc
  27. 5
      src/core/lib/gprpp/orphanable.h
  28. 5
      src/core/lib/gprpp/ref_counted.h
  29. 2
      src/python/grpcio/grpc_core_dependencies.py
  30. 18
      test/core/channel/channel_stack_builder_test.cc
  31. 147
      test/core/util/test_lb_policies.cc
  32. 1
      test/cpp/microbenchmarks/bm_call_create.cc
  33. 4
      tools/doxygen/Doxyfile.core.internal
  34. 6
      tools/run_tests/generated/sources_and_headers.json

@ -1075,10 +1075,10 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.cc", "src/core/ext/filters/client_channel/parse_address.cc",
"src/core/ext/filters/client_channel/proxy_mapper.cc", "src/core/ext/filters/client_channel/proxy_mapper.cc",
"src/core/ext/filters/client_channel/proxy_mapper_registry.cc", "src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
"src/core/ext/filters/client_channel/request_routing.cc",
"src/core/ext/filters/client_channel/resolver.cc", "src/core/ext/filters/client_channel/resolver.cc",
"src/core/ext/filters/client_channel/resolver_registry.cc", "src/core/ext/filters/client_channel/resolver_registry.cc",
"src/core/ext/filters/client_channel/resolver_result_parsing.cc", "src/core/ext/filters/client_channel/resolver_result_parsing.cc",
"src/core/ext/filters/client_channel/resolving_lb_policy.cc",
"src/core/ext/filters/client_channel/retry_throttle.cc", "src/core/ext/filters/client_channel/retry_throttle.cc",
"src/core/ext/filters/client_channel/server_address.cc", "src/core/ext/filters/client_channel/server_address.cc",
"src/core/ext/filters/client_channel/subchannel.cc", "src/core/ext/filters/client_channel/subchannel.cc",
@ -1101,11 +1101,11 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.h", "src/core/ext/filters/client_channel/parse_address.h",
"src/core/ext/filters/client_channel/proxy_mapper.h", "src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h", "src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.h", "src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h", "src/core/ext/filters/client_channel/resolver_factory.h",
"src/core/ext/filters/client_channel/resolver_registry.h", "src/core/ext/filters/client_channel/resolver_registry.h",
"src/core/ext/filters/client_channel/resolver_result_parsing.h", "src/core/ext/filters/client_channel/resolver_result_parsing.h",
"src/core/ext/filters/client_channel/resolving_lb_policy.h",
"src/core/ext/filters/client_channel/retry_throttle.h", "src/core/ext/filters/client_channel/retry_throttle.h",
"src/core/ext/filters/client_channel/server_address.h", "src/core/ext/filters/client_channel/server_address.h",
"src/core/ext/filters/client_channel/subchannel.h", "src/core/ext/filters/client_channel/subchannel.h",

@ -1239,10 +1239,10 @@ add_library(grpc
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc
@ -1594,10 +1594,10 @@ add_library(grpc_cronet
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc
@ -1972,10 +1972,10 @@ add_library(grpc_test_util
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc
@ -2297,10 +2297,10 @@ add_library(grpc_test_util_unsecure
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc
@ -2633,10 +2633,10 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc
@ -3490,10 +3490,10 @@ add_library(grpc++_cronet
src/core/ext/filters/client_channel/parse_address.cc src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc src/core/ext/filters/client_channel/resolver_result_parsing.cc
src/core/ext/filters/client_channel/resolving_lb_policy.cc
src/core/ext/filters/client_channel/retry_throttle.cc src/core/ext/filters/client_channel/retry_throttle.cc
src/core/ext/filters/client_channel/server_address.cc src/core/ext/filters/client_channel/server_address.cc
src/core/ext/filters/client_channel/subchannel.cc src/core/ext/filters/client_channel/subchannel.cc

@ -3758,10 +3758,10 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \
@ -4107,10 +4107,10 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \
@ -4478,10 +4478,10 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \
@ -4790,10 +4790,10 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \
@ -5100,10 +5100,10 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \
@ -5934,10 +5934,10 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \

@ -585,11 +585,11 @@ filegroups:
- src/core/ext/filters/client_channel/parse_address.h - src/core/ext/filters/client_channel/parse_address.h
- src/core/ext/filters/client_channel/proxy_mapper.h - src/core/ext/filters/client_channel/proxy_mapper.h
- src/core/ext/filters/client_channel/proxy_mapper_registry.h - src/core/ext/filters/client_channel/proxy_mapper_registry.h
- src/core/ext/filters/client_channel/request_routing.h
- src/core/ext/filters/client_channel/resolver.h - src/core/ext/filters/client_channel/resolver.h
- src/core/ext/filters/client_channel/resolver_factory.h - src/core/ext/filters/client_channel/resolver_factory.h
- src/core/ext/filters/client_channel/resolver_registry.h - src/core/ext/filters/client_channel/resolver_registry.h
- src/core/ext/filters/client_channel/resolver_result_parsing.h - src/core/ext/filters/client_channel/resolver_result_parsing.h
- src/core/ext/filters/client_channel/resolving_lb_policy.h
- src/core/ext/filters/client_channel/retry_throttle.h - src/core/ext/filters/client_channel/retry_throttle.h
- src/core/ext/filters/client_channel/server_address.h - src/core/ext/filters/client_channel/server_address.h
- src/core/ext/filters/client_channel/subchannel.h - src/core/ext/filters/client_channel/subchannel.h
@ -612,10 +612,10 @@ filegroups:
- src/core/ext/filters/client_channel/parse_address.cc - src/core/ext/filters/client_channel/parse_address.cc
- src/core/ext/filters/client_channel/proxy_mapper.cc - src/core/ext/filters/client_channel/proxy_mapper.cc
- src/core/ext/filters/client_channel/proxy_mapper_registry.cc - src/core/ext/filters/client_channel/proxy_mapper_registry.cc
- src/core/ext/filters/client_channel/request_routing.cc
- src/core/ext/filters/client_channel/resolver.cc - src/core/ext/filters/client_channel/resolver.cc
- src/core/ext/filters/client_channel/resolver_registry.cc - src/core/ext/filters/client_channel/resolver_registry.cc
- src/core/ext/filters/client_channel/resolver_result_parsing.cc - src/core/ext/filters/client_channel/resolver_result_parsing.cc
- src/core/ext/filters/client_channel/resolving_lb_policy.cc
- src/core/ext/filters/client_channel/retry_throttle.cc - src/core/ext/filters/client_channel/retry_throttle.cc
- src/core/ext/filters/client_channel/server_address.cc - src/core/ext/filters/client_channel/server_address.cc
- src/core/ext/filters/client_channel/subchannel.cc - src/core/ext/filters/client_channel/subchannel.cc

@ -355,10 +355,10 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/parse_address.cc \ src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \
src/core/ext/filters/client_channel/subchannel.cc \ src/core/ext/filters/client_channel/subchannel.cc \

@ -330,10 +330,10 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\parse_address.cc " + "src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper.cc " + "src\\core\\ext\\filters\\client_channel\\proxy_mapper.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper_registry.cc " + "src\\core\\ext\\filters\\client_channel\\proxy_mapper_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\request_routing.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver.cc " + "src\\core\\ext\\filters\\client_channel\\resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_registry.cc " + "src\\core\\ext\\filters\\client_channel\\resolver_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_result_parsing.cc " + "src\\core\\ext\\filters\\client_channel\\resolver_result_parsing.cc " +
"src\\core\\ext\\filters\\client_channel\\resolving_lb_policy.cc " +
"src\\core\\ext\\filters\\client_channel\\retry_throttle.cc " + "src\\core\\ext\\filters\\client_channel\\retry_throttle.cc " +
"src\\core\\ext\\filters\\client_channel\\server_address.cc " + "src\\core\\ext\\filters\\client_channel\\server_address.cc " +
"src\\core\\ext\\filters\\client_channel\\subchannel.cc " + "src\\core\\ext\\filters\\client_channel\\subchannel.cc " +

@ -358,11 +358,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h', 'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h', 'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h', 'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h', 'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h', 'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h', 'src/core/ext/filters/client_channel/resolver_registry.h',
'src/core/ext/filters/client_channel/resolver_result_parsing.h', 'src/core/ext/filters/client_channel/resolver_result_parsing.h',
'src/core/ext/filters/client_channel/resolving_lb_policy.h',
'src/core/ext/filters/client_channel/retry_throttle.h', 'src/core/ext/filters/client_channel/retry_throttle.h',
'src/core/ext/filters/client_channel/server_address.h', 'src/core/ext/filters/client_channel/server_address.h',
'src/core/ext/filters/client_channel/subchannel.h', 'src/core/ext/filters/client_channel/subchannel.h',

@ -352,11 +352,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h', 'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h', 'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h', 'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h', 'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h', 'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h', 'src/core/ext/filters/client_channel/resolver_registry.h',
'src/core/ext/filters/client_channel/resolver_result_parsing.h', 'src/core/ext/filters/client_channel/resolver_result_parsing.h',
'src/core/ext/filters/client_channel/resolving_lb_policy.h',
'src/core/ext/filters/client_channel/retry_throttle.h', 'src/core/ext/filters/client_channel/retry_throttle.h',
'src/core/ext/filters/client_channel/server_address.h', 'src/core/ext/filters/client_channel/server_address.h',
'src/core/ext/filters/client_channel/subchannel.h', 'src/core/ext/filters/client_channel/subchannel.h',
@ -799,10 +799,10 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',
@ -980,11 +980,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h', 'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h', 'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h', 'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h', 'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h', 'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h', 'src/core/ext/filters/client_channel/resolver_registry.h',
'src/core/ext/filters/client_channel/resolver_result_parsing.h', 'src/core/ext/filters/client_channel/resolver_result_parsing.h',
'src/core/ext/filters/client_channel/resolving_lb_policy.h',
'src/core/ext/filters/client_channel/retry_throttle.h', 'src/core/ext/filters/client_channel/retry_throttle.h',
'src/core/ext/filters/client_channel/server_address.h', 'src/core/ext/filters/client_channel/server_address.h',
'src/core/ext/filters/client_channel/subchannel.h', 'src/core/ext/filters/client_channel/subchannel.h',

@ -286,11 +286,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.h ) s.files += %w( src/core/ext/filters/client_channel/parse_address.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.h ) s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.h ) s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.h )
s.files += %w( src/core/ext/filters/client_channel/request_routing.h )
s.files += %w( src/core/ext/filters/client_channel/resolver.h ) s.files += %w( src/core/ext/filters/client_channel/resolver.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_factory.h ) s.files += %w( src/core/ext/filters/client_channel/resolver_factory.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.h ) s.files += %w( src/core/ext/filters/client_channel/resolver_registry.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.h ) s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.h )
s.files += %w( src/core/ext/filters/client_channel/resolving_lb_policy.h )
s.files += %w( src/core/ext/filters/client_channel/retry_throttle.h ) s.files += %w( src/core/ext/filters/client_channel/retry_throttle.h )
s.files += %w( src/core/ext/filters/client_channel/server_address.h ) s.files += %w( src/core/ext/filters/client_channel/server_address.h )
s.files += %w( src/core/ext/filters/client_channel/subchannel.h ) s.files += %w( src/core/ext/filters/client_channel/subchannel.h )
@ -736,10 +736,10 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.cc ) s.files += %w( src/core/ext/filters/client_channel/parse_address.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.cc ) s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.cc ) s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/request_routing.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.cc ) s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.cc )
s.files += %w( src/core/ext/filters/client_channel/resolving_lb_policy.cc )
s.files += %w( src/core/ext/filters/client_channel/retry_throttle.cc ) s.files += %w( src/core/ext/filters/client_channel/retry_throttle.cc )
s.files += %w( src/core/ext/filters/client_channel/server_address.cc ) s.files += %w( src/core/ext/filters/client_channel/server_address.cc )
s.files += %w( src/core/ext/filters/client_channel/subchannel.cc ) s.files += %w( src/core/ext/filters/client_channel/subchannel.cc )

@ -537,10 +537,10 @@
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',
@ -801,10 +801,10 @@
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',
@ -1046,10 +1046,10 @@
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',
@ -1302,10 +1302,10 @@
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',

@ -291,11 +291,11 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_factory.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolving_lb_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/retry_throttle.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/retry_throttle.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/server_address.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/server_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/subchannel.h" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/subchannel.h" role="src" />
@ -741,10 +741,10 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolving_lb_policy.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/retry_throttle.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/retry_throttle.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/server_address.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/server_address.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/subchannel.cc" role="src" /> <file baseinstalldir="/" name="src/core/ext/filters/client_channel/subchannel.cc" role="src" />

@ -32,12 +32,14 @@
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/backup_poller.h" #include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/global_subchannel_pool.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h" #include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/local_subchannel_pool.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/request_routing.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h" #include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/resolver_result_parsing.h" #include "src/core/ext/filters/client_channel/resolver_result_parsing.h"
#include "src/core/ext/filters/client_channel/resolving_lb_policy.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h" #include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h" #include "src/core/ext/filters/deadline/deadline_filter.h"
@ -68,6 +70,8 @@ using grpc_core::internal::ClientChannelMethodParamsTable;
using grpc_core::internal::ProcessedResolverResult; using grpc_core::internal::ProcessedResolverResult;
using grpc_core::internal::ServerRetryThrottleData; using grpc_core::internal::ServerRetryThrottleData;
using grpc_core::LoadBalancingPolicy;
/* Client channel implementation */ /* Client channel implementation */
// By default, we buffer 256 KiB per RPC for retries. // By default, we buffer 256 KiB per RPC for retries.
@ -86,44 +90,171 @@ grpc_core::TraceFlag grpc_client_channel_trace(false, "client_channel");
struct external_connectivity_watcher; struct external_connectivity_watcher;
typedef struct client_channel_channel_data { struct QueuedPick {
grpc_core::ManualConstructor<grpc_core::RequestRouter> request_router; LoadBalancingPolicy::PickState pick;
grpc_call_element* elem;
QueuedPick* next = nullptr;
};
typedef struct client_channel_channel_data {
bool deadline_checking_enabled; bool deadline_checking_enabled;
bool enable_retries; bool enable_retries;
size_t per_rpc_retry_buffer_size; size_t per_rpc_retry_buffer_size;
/** combiner protecting all variables below in this data structure */ /** combiner protecting all variables below in this data structure */
grpc_combiner* combiner; grpc_combiner* combiner;
/** retry throttle data */
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
/** maps method names to method_parameters structs */
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
/** owning stack */ /** owning stack */
grpc_channel_stack* owning_stack; grpc_channel_stack* owning_stack;
/** interested parties (owned) */ /** interested parties (owned) */
grpc_pollset_set* interested_parties; grpc_pollset_set* interested_parties;
// Client channel factory. Holds a ref.
/* external_connectivity_watcher_list head is guarded by its own mutex, since grpc_client_channel_factory* client_channel_factory;
* counts need to be grabbed immediately without polling on a cq */ // Subchannel pool.
gpr_mu external_connectivity_watcher_list_mu; grpc_core::RefCountedPtr<grpc_core::SubchannelPoolInterface> subchannel_pool;
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
grpc_core::channelz::ClientChannelNode* channelz_node;
// Resolving LB policy.
grpc_core::OrphanablePtr<LoadBalancingPolicy> resolving_lb_policy;
// Subchannel picker from LB policy.
grpc_core::UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker;
// Linked list of queued picks.
QueuedPick* queued_picks;
bool have_service_config;
/** retry throttle data from service config */
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
/** per-method service config data */
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table;
/* the following properties are guarded by a mutex since APIs require them /* the following properties are guarded by a mutex since APIs require them
to be instantaneously available */ to be instantaneously available */
gpr_mu info_mu; gpr_mu info_mu;
grpc_core::UniquePtr<char> info_lb_policy_name; grpc_core::UniquePtr<char> info_lb_policy_name;
/** service config in JSON form */
grpc_core::UniquePtr<char> info_service_config_json; grpc_core::UniquePtr<char> info_service_config_json;
grpc_connectivity_state_tracker state_tracker;
grpc_error* disconnect_error;
/* external_connectivity_watcher_list head is guarded by its own mutex, since
* counts need to be grabbed immediately without polling on a cq */
gpr_mu external_connectivity_watcher_list_mu;
struct external_connectivity_watcher* external_connectivity_watcher_list_head;
} channel_data; } channel_data;
// Synchronous callback from chand->request_router to process a resolver // Forward declarations.
static void start_pick_locked(void* arg, grpc_error* ignored);
static void maybe_apply_service_config_to_call_locked(grpc_call_element* elem);
static const char* get_channel_connectivity_state_change_string(
grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "Channel state change to IDLE";
case GRPC_CHANNEL_CONNECTING:
return "Channel state change to CONNECTING";
case GRPC_CHANNEL_READY:
return "Channel state change to READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "Channel state change to TRANSIENT_FAILURE";
case GRPC_CHANNEL_SHUTDOWN:
return "Channel state change to SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
static void set_connectivity_state_and_picker_locked(
channel_data* chand, grpc_connectivity_state state, grpc_error* state_error,
const char* reason,
grpc_core::UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker) {
// Update connectivity state.
grpc_connectivity_state_set(&chand->state_tracker, state, state_error,
reason);
if (chand->channelz_node != nullptr) {
chand->channelz_node->AddTraceEvent(
grpc_core::channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
get_channel_connectivity_state_change_string(state)));
}
// Update picker.
chand->picker = std::move(picker);
// Re-process queued picks.
for (QueuedPick* pick = chand->queued_picks; pick != nullptr;
pick = pick->next) {
start_pick_locked(pick->elem, GRPC_ERROR_NONE);
}
}
namespace grpc_core {
namespace {
class ClientChannelControlHelper
: public LoadBalancingPolicy::ChannelControlHelper {
public:
explicit ClientChannelControlHelper(channel_data* chand) : chand_(chand) {
GRPC_CHANNEL_STACK_REF(chand_->owning_stack, "ClientChannelControlHelper");
}
~ClientChannelControlHelper() override {
GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack,
"ClientChannelControlHelper");
}
Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
grpc_arg arg = SubchannelPoolInterface::CreateChannelArg(
chand_->subchannel_pool.get());
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(&args, &arg, 1);
Subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
chand_->client_channel_factory, new_args);
grpc_channel_args_destroy(new_args);
return subchannel;
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
const grpc_channel_args& args) override {
return grpc_client_channel_factory_create_channel(
chand_->client_channel_factory, target, type, &args);
}
void UpdateState(
grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<LoadBalancingPolicy::SubchannelPicker> picker) override {
if (grpc_client_channel_trace.enabled()) {
const char* extra = chand_->disconnect_error == GRPC_ERROR_NONE
? ""
: " (ignoring -- channel shutting down)";
gpr_log(GPR_INFO, "chand=%p: update: state=%s error=%s picker=%p%s",
chand_, grpc_connectivity_state_name(state),
grpc_error_string(state_error), picker.get(), extra);
}
// Do update only if not shutting down.
if (chand_->disconnect_error == GRPC_ERROR_NONE) {
set_connectivity_state_and_picker_locked(chand_, state, state_error,
"helper", std::move(picker));
} else {
GRPC_ERROR_UNREF(state_error);
}
}
// No-op -- we should never get this from ResolvingLoadBalancingPolicy.
void RequestReresolution() override {}
private:
channel_data* chand_;
};
} // namespace
} // namespace grpc_core
// Synchronous callback from chand->resolving_lb_policy to process a resolver
// result update. // result update.
static bool process_resolver_result_locked(void* arg, static bool process_resolver_result_locked(void* arg,
const grpc_channel_args& args, const grpc_channel_args& args,
const char** lb_policy_name, const char** lb_policy_name,
grpc_json** lb_policy_config) { grpc_json** lb_policy_config) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
chand->have_service_config = true;
ProcessedResolverResult resolver_result(args, chand->enable_retries); ProcessedResolverResult resolver_result(args, chand->enable_retries);
grpc_core::UniquePtr<char> service_config_json = grpc_core::UniquePtr<char> service_config_json =
resolver_result.service_config_json(); resolver_result.service_config_json();
@ -148,9 +279,38 @@ static bool process_resolver_result_locked(void* arg,
// Return results. // Return results.
*lb_policy_name = chand->info_lb_policy_name.get(); *lb_policy_name = chand->info_lb_policy_name.get();
*lb_policy_config = resolver_result.lb_policy_config(); *lb_policy_config = resolver_result.lb_policy_config();
// Apply service config to queued picks.
for (QueuedPick* pick = chand->queued_picks; pick != nullptr;
pick = pick->next) {
maybe_apply_service_config_to_call_locked(pick->elem);
}
return service_config_changed; return service_config_changed;
} }
static grpc_error* do_ping_locked(channel_data* chand, grpc_transport_op* op) {
grpc_error* error = GRPC_ERROR_NONE;
grpc_connectivity_state state =
grpc_connectivity_state_get(&chand->state_tracker, &error);
if (state != GRPC_CHANNEL_READY) {
grpc_error* new_error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"channel not connected", &error, 1);
GRPC_ERROR_UNREF(error);
return new_error;
}
LoadBalancingPolicy::PickState pick;
chand->picker->Pick(&pick, &error);
if (pick.connected_subchannel != nullptr) {
pick.connected_subchannel->Ping(op->send_ping.on_initiate,
op->send_ping.on_ack);
} else {
if (error == GRPC_ERROR_NONE) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"LB policy dropped call on ping");
}
}
return error;
}
static void start_transport_op_locked(void* arg, grpc_error* error_ignored) { static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
grpc_transport_op* op = static_cast<grpc_transport_op*>(arg); grpc_transport_op* op = static_cast<grpc_transport_op*>(arg);
grpc_channel_element* elem = grpc_channel_element* elem =
@ -158,47 +318,40 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (op->on_connectivity_state_change != nullptr) { if (op->on_connectivity_state_change != nullptr) {
chand->request_router->NotifyOnConnectivityStateChange( grpc_connectivity_state_notify_on_state_change(
op->connectivity_state, op->on_connectivity_state_change); &chand->state_tracker, op->connectivity_state,
op->on_connectivity_state_change);
op->on_connectivity_state_change = nullptr; op->on_connectivity_state_change = nullptr;
op->connectivity_state = nullptr; op->connectivity_state = nullptr;
} }
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) { if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
if (chand->request_router->lb_policy() == nullptr) { grpc_error* error = do_ping_locked(chand, op);
grpc_error* error = if (error != GRPC_ERROR_NONE) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing");
GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error); GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
} else {
grpc_error* error = GRPC_ERROR_NONE;
grpc_core::LoadBalancingPolicy::PickState pick_state;
// Pick must return synchronously, because pick_state.on_complete is null.
GPR_ASSERT(
chand->request_router->lb_policy()->PickLocked(&pick_state, &error));
if (pick_state.connected_subchannel != nullptr) {
pick_state.connected_subchannel->Ping(op->send_ping.on_initiate,
op->send_ping.on_ack);
} else {
if (error == GRPC_ERROR_NONE) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"LB policy dropped call on ping");
}
GRPC_CLOSURE_SCHED(op->send_ping.on_initiate, GRPC_ERROR_REF(error));
GRPC_CLOSURE_SCHED(op->send_ping.on_ack, error);
}
op->bind_pollset = nullptr;
} }
op->bind_pollset = nullptr;
op->send_ping.on_initiate = nullptr; op->send_ping.on_initiate = nullptr;
op->send_ping.on_ack = nullptr; op->send_ping.on_ack = nullptr;
} }
if (op->disconnect_with_error != GRPC_ERROR_NONE) { if (op->reset_connect_backoff) {
chand->request_router->ShutdownLocked(op->disconnect_with_error); chand->resolving_lb_policy->ResetBackoffLocked();
} }
if (op->reset_connect_backoff) { if (op->disconnect_with_error != GRPC_ERROR_NONE) {
chand->request_router->ResetConnectionBackoffLocked(); chand->disconnect_error = op->disconnect_with_error;
grpc_pollset_set_del_pollset_set(
chand->resolving_lb_policy->interested_parties(),
chand->interested_parties);
chand->resolving_lb_policy.reset();
set_connectivity_state_and_picker_locked(
chand, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(op->disconnect_with_error),
"shutdown from API",
grpc_core::UniquePtr<LoadBalancingPolicy::SubchannelPicker>(
grpc_core::New<LoadBalancingPolicy::TransientFailurePicker>(
GRPC_ERROR_REF(op->disconnect_with_error))));
} }
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op"); GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
@ -244,6 +397,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
GPR_ASSERT(elem->filter == &grpc_client_channel_filter); GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members. // Initialize data members.
chand->combiner = grpc_combiner_create(); chand->combiner = grpc_combiner_create();
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
chand->disconnect_error = GRPC_ERROR_NONE;
gpr_mu_init(&chand->info_mu); gpr_mu_init(&chand->info_mu);
gpr_mu_init(&chand->external_connectivity_watcher_list_mu); gpr_mu_init(&chand->external_connectivity_watcher_list_mu);
@ -275,8 +431,9 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING( return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"client channel factory arg must be a pointer"); "client channel factory arg must be a pointer");
} }
grpc_client_channel_factory* client_channel_factory = chand->client_channel_factory =
static_cast<grpc_client_channel_factory*>(arg->value.pointer.p); static_cast<grpc_client_channel_factory*>(arg->value.pointer.p);
grpc_client_channel_factory_ref(chand->client_channel_factory);
// Get server name to resolve, using proxy mapper if needed. // Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == nullptr) { if (arg == nullptr) {
@ -291,26 +448,71 @@ static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
grpc_channel_args* new_args = nullptr; grpc_channel_args* new_args = nullptr;
grpc_proxy_mappers_map_name(arg->value.string, args->channel_args, grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
&proxy_name, &new_args); &proxy_name, &new_args);
// Instantiate request router. grpc_core::UniquePtr<char> target_uri(
grpc_client_channel_factory_ref(client_channel_factory); proxy_name != nullptr ? proxy_name : gpr_strdup(arg->value.string));
// Instantiate subchannel pool.
arg = grpc_channel_args_find(args->channel_args,
GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL);
if (grpc_channel_arg_get_bool(arg, false)) {
chand->subchannel_pool =
grpc_core::MakeRefCounted<grpc_core::LocalSubchannelPool>();
} else {
chand->subchannel_pool = grpc_core::GlobalSubchannelPool::instance();
}
// Instantiate resolving LB policy.
LoadBalancingPolicy::Args lb_args;
lb_args.combiner = chand->combiner;
lb_args.channel_control_helper =
grpc_core::UniquePtr<LoadBalancingPolicy::ChannelControlHelper>(
grpc_core::New<grpc_core::ClientChannelControlHelper>(chand));
lb_args.args = new_args != nullptr ? new_args : args->channel_args;
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
chand->request_router.Init( chand->resolving_lb_policy.reset(
chand->owning_stack, chand->combiner, client_channel_factory, grpc_core::New<grpc_core::ResolvingLoadBalancingPolicy>(
chand->interested_parties, &grpc_client_channel_trace, std::move(lb_args), &grpc_client_channel_trace, std::move(target_uri),
process_resolver_result_locked, chand, process_resolver_result_locked, chand, &error));
proxy_name != nullptr ? proxy_name : arg->value.string /* target_uri */,
new_args != nullptr ? new_args : args->channel_args, &error);
gpr_free(proxy_name);
grpc_channel_args_destroy(new_args); grpc_channel_args_destroy(new_args);
if (error != GRPC_ERROR_NONE) {
// Orphan the resolving LB policy and flush the exec_ctx to ensure
// that it finishes shutting down. This ensures that if we are
// failing, we destroy the ClientChannelControlHelper (and thus
// unref the channel stack) before we return.
// TODO(roth): This is not a complete solution, because it only
// catches the case where channel stack initialization fails in this
// particular filter. If there is a failure in a different filter, we
// will leave a dangling ref here, which can cause a crash. Fortunately,
// in practice, there are no other filters that can cause failures in
// channel stack initialization, so this works for now.
chand->resolving_lb_policy.reset();
grpc_core::ExecCtx::Get()->Flush();
} else {
grpc_pollset_set_add_pollset_set(
chand->resolving_lb_policy->interested_parties(),
chand->interested_parties);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p: created resolving_lb_policy=%p", chand,
chand->resolving_lb_policy.get());
}
}
return error; return error;
} }
/* Destructor for channel_data */ /* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_channel_element* elem) { static void cc_destroy_channel_elem(grpc_channel_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->request_router.Destroy(); if (chand->resolving_lb_policy != nullptr) {
grpc_pollset_set_del_pollset_set(
chand->resolving_lb_policy->interested_parties(),
chand->interested_parties);
chand->resolving_lb_policy.reset();
}
// TODO(roth): Once we convert the filter API to C++, there will no // TODO(roth): Once we convert the filter API to C++, there will no
// longer be any need to explicitly reset these smart pointer data members. // longer be any need to explicitly reset these smart pointer data members.
chand->picker.reset();
chand->subchannel_pool.reset();
if (chand->client_channel_factory != nullptr) {
grpc_client_channel_factory_unref(chand->client_channel_factory);
}
chand->info_lb_policy_name.reset(); chand->info_lb_policy_name.reset();
chand->info_service_config_json.reset(); chand->info_service_config_json.reset();
chand->retry_throttle_data.reset(); chand->retry_throttle_data.reset();
@ -318,6 +520,8 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
grpc_client_channel_stop_backup_polling(chand->interested_parties); grpc_client_channel_stop_backup_polling(chand->interested_parties);
grpc_pollset_set_destroy(chand->interested_parties); grpc_pollset_set_destroy(chand->interested_parties);
GRPC_COMBINER_UNREF(chand->combiner, "client_channel"); GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
GRPC_ERROR_UNREF(chand->disconnect_error);
grpc_connectivity_state_destroy(&chand->state_tracker);
gpr_mu_destroy(&chand->info_mu); gpr_mu_destroy(&chand->info_mu);
gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu); gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
} }
@ -371,6 +575,12 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
// (census filter is on top of this one) // (census filter is on top of this one)
// - add census stats for retries // - add census stats for retries
namespace grpc_core {
namespace {
class QueuedPickCanceller;
} // namespace
} // namespace grpc_core
namespace { namespace {
struct call_data; struct call_data;
@ -509,8 +719,11 @@ struct call_data {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) { for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
GPR_ASSERT(pending_batches[i].batch == nullptr); GPR_ASSERT(pending_batches[i].batch == nullptr);
} }
if (have_request) { for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
request.Destroy(); if (pick.pick.subchannel_call_context[i].destroy != nullptr) {
pick.pick.subchannel_call_context[i].destroy(
pick.pick.subchannel_call_context[i].value);
}
} }
} }
@ -537,8 +750,10 @@ struct call_data {
// Set when we get a cancel_stream op. // Set when we get a cancel_stream op.
grpc_error* cancel_error = GRPC_ERROR_NONE; grpc_error* cancel_error = GRPC_ERROR_NONE;
grpc_core::ManualConstructor<grpc_core::RequestRouter::Request> request; QueuedPick pick;
bool have_request = false; bool pick_queued = false;
bool service_config_applied = false;
grpc_core::QueuedPickCanceller* pick_canceller = nullptr;
grpc_closure pick_closure; grpc_closure pick_closure;
grpc_polling_entity* pollent = nullptr; grpc_polling_entity* pollent = nullptr;
@ -600,7 +815,7 @@ static void retry_commit(grpc_call_element* elem,
static void start_internal_recv_trailing_metadata(grpc_call_element* elem); static void start_internal_recv_trailing_metadata(grpc_call_element* elem);
static void on_complete(void* arg, grpc_error* error); static void on_complete(void* arg, grpc_error* error);
static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored); static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored);
static void start_pick_locked(void* arg, grpc_error* ignored); static void remove_call_from_queued_picks_locked(grpc_call_element* elem);
// //
// send op data caching // send op data caching
@ -728,7 +943,7 @@ static void free_cached_send_op_data_for_completed_batch(
// //
void maybe_inject_recv_trailing_metadata_ready_for_lb( void maybe_inject_recv_trailing_metadata_ready_for_lb(
const grpc_core::LoadBalancingPolicy::PickState& pick, const LoadBalancingPolicy::PickState& pick,
grpc_transport_stream_op_batch* batch) { grpc_transport_stream_op_batch* batch) {
if (pick.recv_trailing_metadata_ready != nullptr) { if (pick.recv_trailing_metadata_ready != nullptr) {
*pick.original_recv_trailing_metadata_ready = *pick.original_recv_trailing_metadata_ready =
@ -846,10 +1061,25 @@ static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
} }
// This is called via the call combiner, so access to calld is synchronized. // This is called via the call combiner, so access to calld is synchronized.
// If yield_call_combiner is true, assumes responsibility for yielding // If yield_call_combiner_predicate returns true, assumes responsibility for
// the call combiner. // yielding the call combiner.
static void pending_batches_fail(grpc_call_element* elem, grpc_error* error, typedef bool (*YieldCallCombinerPredicate)(
bool yield_call_combiner) { const grpc_core::CallCombinerClosureList& closures);
static bool yield_call_combiner(
const grpc_core::CallCombinerClosureList& closures) {
return true;
}
static bool no_yield_call_combiner(
const grpc_core::CallCombinerClosureList& closures) {
return false;
}
static bool yield_call_combiner_if_pending_batches_found(
const grpc_core::CallCombinerClosureList& closures) {
return closures.size() > 0;
}
static void pending_batches_fail(
grpc_call_element* elem, grpc_error* error,
YieldCallCombinerPredicate yield_call_combiner_predicate) {
GPR_ASSERT(error != GRPC_ERROR_NONE); GPR_ASSERT(error != GRPC_ERROR_NONE);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
@ -866,9 +1096,9 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
pending_batch* pending = &calld->pending_batches[i]; pending_batch* pending = &calld->pending_batches[i];
grpc_transport_stream_op_batch* batch = pending->batch; grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr) { if (batch != nullptr) {
if (batch->recv_trailing_metadata && calld->have_request) { if (batch->recv_trailing_metadata) {
maybe_inject_recv_trailing_metadata_ready_for_lb( maybe_inject_recv_trailing_metadata_ready_for_lb(calld->pick.pick,
*calld->request->pick(), batch); batch);
} }
batch->handler_private.extra_arg = calld; batch->handler_private.extra_arg = calld;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
@ -879,7 +1109,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
pending_batch_clear(calld, pending); pending_batch_clear(calld, pending);
} }
} }
if (yield_call_combiner) { if (yield_call_combiner_predicate(closures)) {
closures.RunClosures(calld->call_combiner); closures.RunClosures(calld->call_combiner);
} else { } else {
closures.RunClosuresWithoutYielding(calld->call_combiner); closures.RunClosuresWithoutYielding(calld->call_combiner);
@ -923,8 +1153,8 @@ static void pending_batches_resume(grpc_call_element* elem) {
grpc_transport_stream_op_batch* batch = pending->batch; grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr) { if (batch != nullptr) {
if (batch->recv_trailing_metadata) { if (batch->recv_trailing_metadata) {
maybe_inject_recv_trailing_metadata_ready_for_lb( maybe_inject_recv_trailing_metadata_ready_for_lb(calld->pick.pick,
*calld->request->pick(), batch); batch);
} }
batch->handler_private.extra_arg = calld->subchannel_call.get(); batch->handler_private.extra_arg = calld->subchannel_call.get();
GRPC_CLOSURE_INIT(&batch->handler_private.closure, GRPC_CLOSURE_INIT(&batch->handler_private.closure,
@ -1015,11 +1245,9 @@ static void do_retry(grpc_call_element* elem,
const ClientChannelMethodParams::RetryPolicy* retry_policy = const ClientChannelMethodParams::RetryPolicy* retry_policy =
calld->method_params->retry_policy(); calld->method_params->retry_policy();
GPR_ASSERT(retry_policy != nullptr); GPR_ASSERT(retry_policy != nullptr);
// Reset subchannel call and connected subchannel.
calld->subchannel_call.reset(); calld->subchannel_call.reset();
if (calld->have_request) { calld->pick.pick.connected_subchannel.reset();
calld->have_request = false;
calld->request.Destroy();
}
// Compute backoff delay. // Compute backoff delay.
grpc_millis next_attempt_time; grpc_millis next_attempt_time;
if (server_pushback_ms >= 0) { if (server_pushback_ms >= 0) {
@ -1938,7 +2166,7 @@ static void add_retriable_recv_trailing_metadata_op(
batch_data->batch.payload->recv_trailing_metadata batch_data->batch.payload->recv_trailing_metadata
.recv_trailing_metadata_ready = .recv_trailing_metadata_ready =
&retry_state->recv_trailing_metadata_ready; &retry_state->recv_trailing_metadata_ready;
maybe_inject_recv_trailing_metadata_ready_for_lb(*calld->request->pick(), maybe_inject_recv_trailing_metadata_ready_for_lb(calld->pick.pick,
&batch_data->batch); &batch_data->batch);
} }
@ -2207,41 +2435,38 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
// LB pick // LB pick
// //
static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) { static void create_subchannel_call(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
const size_t parent_data_size = const size_t parent_data_size =
calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0; calld->enable_retries ? sizeof(subchannel_call_retry_state) : 0;
const grpc_core::ConnectedSubchannel::CallArgs call_args = { const grpc_core::ConnectedSubchannel::CallArgs call_args = {
calld->pollent, // pollent calld->pollent, // pollent
calld->path, // path calld->path, // path
calld->call_start_time, // start_time calld->call_start_time, // start_time
calld->deadline, // deadline calld->deadline, // deadline
calld->arena, // arena calld->arena, // arena
calld->request->pick()->subchannel_call_context, // context calld->pick.pick.subchannel_call_context, // context
calld->call_combiner, // call_combiner calld->call_combiner, // call_combiner
parent_data_size // parent_data_size parent_data_size // parent_data_size
}; };
grpc_error* new_error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
calld->subchannel_call = calld->subchannel_call =
calld->request->pick()->connected_subchannel->CreateCall(call_args, calld->pick.pick.connected_subchannel->CreateCall(call_args, &error);
&new_error);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s", gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call.get(), chand, calld, calld->subchannel_call.get(),
grpc_error_string(new_error)); grpc_error_string(error));
} }
if (GPR_UNLIKELY(new_error != GRPC_ERROR_NONE)) { if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
new_error = grpc_error_add_child(new_error, error); pending_batches_fail(elem, error, yield_call_combiner);
pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
} else { } else {
if (parent_data_size > 0) { if (parent_data_size > 0) {
new (calld->subchannel_call->GetParentData()) subchannel_call_retry_state( new (calld->subchannel_call->GetParentData())
calld->request->pick()->subchannel_call_context); subchannel_call_retry_state(calld->pick.pick.subchannel_call_context);
} }
pending_batches_resume(elem); pending_batches_resume(elem);
} }
GRPC_ERROR_UNREF(error);
} }
// Invoked when a pick is completed, on both success or failure. // Invoked when a pick is completed, on both success or failure.
@ -2249,54 +2474,106 @@ static void pick_done(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg); grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (GPR_UNLIKELY(calld->request->pick()->connected_subchannel == nullptr)) { if (error != GRPC_ERROR_NONE) {
// Failed to create subchannel. if (grpc_client_channel_trace.enabled()) {
// If there was no error, this is an LB policy drop, in which case gpr_log(GPR_INFO,
// we return an error; otherwise, we may retry. "chand=%p calld=%p: failed to pick subchannel: error=%s", chand,
grpc_status_code status = GRPC_STATUS_OK; calld, grpc_error_string(error));
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr, }
nullptr); pending_batches_fail(elem, GRPC_ERROR_REF(error), yield_call_combiner);
if (error == GRPC_ERROR_NONE || !calld->enable_retries || return;
!maybe_retry(elem, nullptr /* batch_data */, status, }
nullptr /* server_pushback_md */)) { create_subchannel_call(elem);
grpc_error* new_error = }
error == GRPC_ERROR_NONE
? GRPC_ERROR_CREATE_FROM_STATIC_STRING( namespace grpc_core {
"Call dropped by load balancing policy") namespace {
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1); // A class to handle the call combiner cancellation callback for a
// queued pick.
class QueuedPickCanceller {
public:
explicit QueuedPickCanceller(grpc_call_element* elem) : elem_(elem) {
auto* calld = static_cast<call_data*>(elem->call_data);
auto* chand = static_cast<channel_data*>(elem->channel_data);
GRPC_CALL_STACK_REF(calld->owning_call, "QueuedPickCanceller");
GRPC_CLOSURE_INIT(&closure_, &CancelLocked, this,
grpc_combiner_scheduler(chand->combiner));
grpc_call_combiner_set_notify_on_cancel(calld->call_combiner, &closure_);
}
private:
static void CancelLocked(void* arg, grpc_error* error) {
auto* self = static_cast<QueuedPickCanceller*>(arg);
auto* chand = static_cast<channel_data*>(self->elem_->channel_data);
auto* calld = static_cast<call_data*>(self->elem_->call_data);
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling queued pick: "
"error=%s self=%p calld->pick_canceller=%p",
chand, calld, grpc_error_string(error), self,
calld->pick_canceller);
}
if (calld->pick_canceller == self && error != GRPC_ERROR_NONE) {
// Remove pick from list of queued picks.
remove_call_from_queued_picks_locked(self->elem_);
// Fail pending batches on the call.
pending_batches_fail(self->elem_, GRPC_ERROR_REF(error),
yield_call_combiner_if_pending_batches_found);
}
GRPC_CALL_STACK_UNREF(calld->owning_call, "QueuedPickCanceller");
Delete(self);
}
grpc_call_element* elem_;
grpc_closure closure_;
};
} // namespace
} // namespace grpc_core
// Removes the call from the channel's list of queued picks.
static void remove_call_from_queued_picks_locked(grpc_call_element* elem) {
auto* chand = static_cast<channel_data*>(elem->channel_data);
auto* calld = static_cast<call_data*>(elem->call_data);
for (QueuedPick** pick = &chand->queued_picks; *pick != nullptr;
pick = &(*pick)->next) {
if (*pick == &calld->pick) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO, "chand=%p calld=%p: removing from queued picks list",
"chand=%p calld=%p: failed to create subchannel: error=%s", chand, calld);
chand, calld, grpc_error_string(new_error));
} }
pending_batches_fail(elem, new_error, true /* yield_call_combiner */); calld->pick_queued = false;
*pick = calld->pick.next;
// Remove call's pollent from channel's interested_parties.
grpc_polling_entity_del_from_pollset_set(calld->pollent,
chand->interested_parties);
// Lame the call combiner canceller.
calld->pick_canceller = nullptr;
break;
} }
} else {
/* Create call on subchannel. */
create_subchannel_call(elem, GRPC_ERROR_REF(error));
} }
} }
// If the channel is in TRANSIENT_FAILURE and the call is not // Adds the call to the channel's list of queued picks.
// wait_for_ready=true, fails the call and returns true. static void add_call_to_queued_picks_locked(grpc_call_element* elem) {
static bool fail_call_if_in_transient_failure(grpc_call_element* elem) { auto* chand = static_cast<channel_data*>(elem->channel_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data); auto* calld = static_cast<call_data*>(elem->call_data);
call_data* calld = static_cast<call_data*>(elem->call_data); if (grpc_client_channel_trace.enabled()) {
grpc_transport_stream_op_batch* batch = calld->pending_batches[0].batch; gpr_log(GPR_INFO, "chand=%p calld=%p: adding to queued picks list", chand,
if (chand->request_router->GetConnectivityState() == calld);
GRPC_CHANNEL_TRANSIENT_FAILURE &&
(batch->payload->send_initial_metadata.send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
pending_batches_fail(
elem,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"channel is in state TRANSIENT_FAILURE"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
true /* yield_call_combiner */);
return true;
} }
return false; calld->pick_queued = true;
// Add call to queued picks list.
calld->pick.elem = elem;
calld->pick.next = chand->queued_picks;
chand->queued_picks = &calld->pick;
// Add call's pollent to channel's interested_parties, so that I/O
// can be done under the call's CQ.
grpc_polling_entity_add_to_pollset_set(calld->pollent,
chand->interested_parties);
// Register call combiner cancellation callback.
calld->pick_canceller = grpc_core::New<grpc_core::QueuedPickCanceller>(elem);
} }
// Applies service config to the call. Must be invoked once we know // Applies service config to the call. Must be invoked once we know
@ -2356,36 +2633,37 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
} }
// Invoked once resolver results are available. // Invoked once resolver results are available.
static bool maybe_apply_service_config_to_call_locked(void* arg) { static void maybe_apply_service_config_to_call_locked(grpc_call_element* elem) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
// Only get service config data on the first attempt. // Apply service config data to the call only once, and only if the
if (GPR_LIKELY(calld->num_attempts_completed == 0)) { // channel has the data available.
if (GPR_LIKELY(chand->have_service_config &&
!calld->service_config_applied)) {
calld->service_config_applied = true;
apply_service_config_to_call_locked(elem); apply_service_config_to_call_locked(elem);
// Check this after applying service config, since it may have
// affected the call's wait_for_ready value.
if (fail_call_if_in_transient_failure(elem)) return false;
} }
return true;
} }
static void start_pick_locked(void* arg, grpc_error* ignored) { static const char* pick_result_name(
LoadBalancingPolicy::SubchannelPicker::PickResult result) {
switch (result) {
case LoadBalancingPolicy::SubchannelPicker::PICK_COMPLETE:
return "COMPLETE";
case LoadBalancingPolicy::SubchannelPicker::PICK_QUEUE:
return "QUEUE";
case LoadBalancingPolicy::SubchannelPicker::PICK_TRANSIENT_FAILURE:
return "TRANSIENT_FAILURE";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
static void start_pick_locked(void* arg, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg); grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(!calld->have_request); GPR_ASSERT(calld->pick.pick.connected_subchannel == nullptr);
GPR_ASSERT(calld->subchannel_call == nullptr); GPR_ASSERT(calld->subchannel_call == nullptr);
// Normally, we want to do this check until after we've processed the
// service config, so that we can honor the wait_for_ready setting in
// the service config. However, if the channel is in TRANSIENT_FAILURE
// and we don't have an LB policy at this point, that means that the
// resolver has returned a failure, so we're not going to get a service
// config right away. In that case, we fail the call now based on the
// wait_for_ready value passed in from the application.
if (chand->request_router->lb_policy() == nullptr &&
fail_call_if_in_transient_failure(elem)) {
return;
}
// If this is a retry, use the send_initial_metadata payload that // If this is a retry, use the send_initial_metadata payload that
// we've cached; otherwise, use the pending batch. The // we've cached; otherwise, use the pending batch. The
// send_initial_metadata batch will be the first pending batch in the // send_initial_metadata batch will be the first pending batch in the
@ -2396,25 +2674,78 @@ static void start_pick_locked(void* arg, grpc_error* ignored) {
// allocate the subchannel batch earlier so that we can give the // allocate the subchannel batch earlier so that we can give the
// subchannel's copy of the metadata batch (which is copied for each // subchannel's copy of the metadata batch (which is copied for each
// attempt) to the LB policy instead the one from the parent channel. // attempt) to the LB policy instead the one from the parent channel.
grpc_metadata_batch* initial_metadata = calld->pick.pick.initial_metadata =
calld->seen_send_initial_metadata calld->seen_send_initial_metadata
? &calld->send_initial_metadata ? &calld->send_initial_metadata
: calld->pending_batches[0] : calld->pending_batches[0]
.batch->payload->send_initial_metadata.send_initial_metadata; .batch->payload->send_initial_metadata.send_initial_metadata;
uint32_t* initial_metadata_flags = uint32_t* send_initial_metadata_flags =
calld->seen_send_initial_metadata calld->seen_send_initial_metadata
? &calld->send_initial_metadata_flags ? &calld->send_initial_metadata_flags
: &calld->pending_batches[0] : &calld->pending_batches[0]
.batch->payload->send_initial_metadata .batch->payload->send_initial_metadata
.send_initial_metadata_flags; .send_initial_metadata_flags;
// Apply service config to call if needed.
maybe_apply_service_config_to_call_locked(elem);
// When done, we schedule this closure to leave the channel combiner.
GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem, GRPC_CLOSURE_INIT(&calld->pick_closure, pick_done, elem,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
calld->request.Init(calld->owning_call, calld->call_combiner, calld->pollent, // Attempt pick.
initial_metadata, initial_metadata_flags, error = GRPC_ERROR_NONE;
maybe_apply_service_config_to_call_locked, elem, auto pick_result = chand->picker->Pick(&calld->pick.pick, &error);
&calld->pick_closure); if (grpc_client_channel_trace.enabled()) {
calld->have_request = true; gpr_log(GPR_INFO,
chand->request_router->RouteCallLocked(calld->request.get()); "chand=%p calld=%p: LB pick returned %s (connected_subchannel=%p, "
"error=%s)",
chand, calld, pick_result_name(pick_result),
calld->pick.pick.connected_subchannel.get(),
grpc_error_string(error));
}
switch (pick_result) {
case LoadBalancingPolicy::SubchannelPicker::PICK_TRANSIENT_FAILURE:
// If we're shutting down, fail all RPCs.
if (chand->disconnect_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(error);
GRPC_CLOSURE_SCHED(&calld->pick_closure,
GRPC_ERROR_REF(chand->disconnect_error));
break;
}
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
if ((*send_initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY) == 0) {
// Retry if appropriate; otherwise, fail.
grpc_status_code status = GRPC_STATUS_OK;
grpc_error_get_status(error, calld->deadline, &status, nullptr, nullptr,
nullptr);
if (!calld->enable_retries ||
!maybe_retry(elem, nullptr /* batch_data */, status,
nullptr /* server_pushback_md */)) {
grpc_error* new_error =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1);
GRPC_ERROR_UNREF(error);
GRPC_CLOSURE_SCHED(&calld->pick_closure, new_error);
}
if (calld->pick_queued) remove_call_from_queued_picks_locked(elem);
break;
}
// If wait_for_ready is true, then queue to retry when we get a new
// picker.
GRPC_ERROR_UNREF(error);
// Fallthrough
case LoadBalancingPolicy::SubchannelPicker::PICK_QUEUE:
if (!calld->pick_queued) add_call_to_queued_picks_locked(elem);
break;
default: // PICK_COMPLETE
// Handle drops.
if (GPR_UNLIKELY(calld->pick.pick.connected_subchannel == nullptr)) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Call dropped by load balancing policy");
}
GRPC_CLOSURE_SCHED(&calld->pick_closure, error);
if (calld->pick_queued) remove_call_from_queued_picks_locked(elem);
}
} }
// //
@ -2458,8 +2789,10 @@ static void cc_start_transport_stream_op_batch(
// been started), fail all pending batches. Otherwise, send the // been started), fail all pending batches. Otherwise, send the
// cancellation down to the subchannel call. // cancellation down to the subchannel call.
if (calld->subchannel_call == nullptr) { if (calld->subchannel_call == nullptr) {
// TODO(roth): If there is a pending retry callback, do we need to
// cancel it here?
pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error), pending_batches_fail(elem, GRPC_ERROR_REF(calld->cancel_error),
false /* yield_call_combiner */); no_yield_call_combiner);
// Note: This will release the call combiner. // Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure( grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner); batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
@ -2556,7 +2889,8 @@ const grpc_channel_filter grpc_client_channel_filter = {
void grpc_client_channel_set_channelz_node( void grpc_client_channel_set_channelz_node(
grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node) { grpc_channel_element* elem, grpc_core::channelz::ClientChannelNode* node) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
chand->request_router->set_channelz_node(node); chand->channelz_node = node;
chand->resolving_lb_policy->set_channelz_node(node->Ref());
} }
void grpc_client_channel_populate_child_refs( void grpc_client_channel_populate_child_refs(
@ -2564,22 +2898,23 @@ void grpc_client_channel_populate_child_refs(
grpc_core::channelz::ChildRefsList* child_subchannels, grpc_core::channelz::ChildRefsList* child_subchannels,
grpc_core::channelz::ChildRefsList* child_channels) { grpc_core::channelz::ChildRefsList* child_channels) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
if (chand->request_router->lb_policy() != nullptr) { if (chand->resolving_lb_policy != nullptr) {
chand->request_router->lb_policy()->FillChildRefsForChannelz( chand->resolving_lb_policy->FillChildRefsForChannelz(child_subchannels,
child_subchannels, child_channels); child_channels);
} }
} }
static void try_to_connect_locked(void* arg, grpc_error* error_ignored) { static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
chand->request_router->ExitIdleLocked(); chand->resolving_lb_policy->ExitIdleLocked();
GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect"); GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
} }
grpc_connectivity_state grpc_client_channel_check_connectivity_state( grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_channel_element* elem, int try_to_connect) { grpc_channel_element* elem, int try_to_connect) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_connectivity_state out = chand->request_router->GetConnectivityState(); grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) { if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
@ -2688,15 +3023,15 @@ static void watch_connectivity_state_locked(void* arg,
GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE); GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w, GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
grpc_combiner_scheduler(w->chand->combiner)); grpc_combiner_scheduler(w->chand->combiner));
w->chand->request_router->NotifyOnConnectivityStateChange(w->state, grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
&w->my_closure); w->state, &w->my_closure);
} else { } else {
GPR_ASSERT(w->watcher_timer_init == nullptr); GPR_ASSERT(w->watcher_timer_init == nullptr);
found = lookup_external_connectivity_watcher(w->chand, w->on_complete); found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
if (found) { if (found) {
GPR_ASSERT(found->on_complete == w->on_complete); GPR_ASSERT(found->on_complete == w->on_complete);
found->chand->request_router->NotifyOnConnectivityStateChange( grpc_connectivity_state_notify_on_state_change(
nullptr, &found->my_closure); &found->chand->state_tracker, nullptr, &found->my_closure);
} }
grpc_polling_entity_del_from_pollset_set(&w->pollent, grpc_polling_entity_del_from_pollset_set(&w->pollent,
w->chand->interested_parties); w->chand->interested_parties);

@ -28,6 +28,17 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount(
namespace grpc_core { namespace grpc_core {
LoadBalancingPolicy::LoadBalancingPolicy(Args args, intptr_t initial_refcount)
: InternallyRefCounted(&grpc_trace_lb_policy_refcount, initial_refcount),
combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
interested_parties_(grpc_pollset_set_create()),
channel_control_helper_(std::move(args.channel_control_helper)) {}
LoadBalancingPolicy::~LoadBalancingPolicy() {
grpc_pollset_set_destroy(interested_parties_);
GRPC_COMBINER_UNREF(combiner_, "lb_policy");
}
grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig( grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig(
const grpc_json* lb_config_array) { const grpc_json* lb_config_array) {
if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) { if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) {
@ -54,35 +65,4 @@ grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig(
return nullptr; return nullptr;
} }
LoadBalancingPolicy::LoadBalancingPolicy(Args args)
: InternallyRefCounted(&grpc_trace_lb_policy_refcount),
combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")),
client_channel_factory_(args.client_channel_factory),
subchannel_pool_(std::move(args.subchannel_pool)),
interested_parties_(grpc_pollset_set_create()),
request_reresolution_(nullptr) {}
LoadBalancingPolicy::~LoadBalancingPolicy() {
grpc_pollset_set_destroy(interested_parties_);
GRPC_COMBINER_UNREF(combiner_, "lb_policy");
}
void LoadBalancingPolicy::TryReresolutionLocked(
grpc_core::TraceFlag* grpc_lb_trace, grpc_error* error) {
if (request_reresolution_ != nullptr) {
GRPC_CLOSURE_SCHED(request_reresolution_, error);
request_reresolution_ = nullptr;
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_INFO,
"%s %p: scheduling re-resolution closure with error=%s.",
grpc_lb_trace->name(), this, grpc_error_string(error));
}
} else {
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_INFO, "%s %p: no available re-resolution closure.",
grpc_lb_trace->name(), this);
}
}
}
} // namespace grpc_core } // namespace grpc_core

@ -24,7 +24,6 @@
#include "src/core/ext/filters/client_channel/client_channel_channelz.h" #include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h" #include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/subchannel.h" #include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
#include "src/core/lib/gprpp/abstract.h" #include "src/core/lib/gprpp/abstract.h"
#include "src/core/lib/gprpp/orphanable.h" #include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h" #include "src/core/lib/gprpp/ref_counted_ptr.h"
@ -43,61 +42,192 @@ namespace grpc_core {
/// ///
/// Any I/O done by the LB policy should be done under the pollset_set /// Any I/O done by the LB policy should be done under the pollset_set
/// returned by \a interested_parties(). /// returned by \a interested_parties().
// TODO(roth): Once we move to EventManager-based polling, remove the
// interested_parties() hooks from the API.
class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> { class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
public: public:
struct Args {
/// The combiner under which all LB policy calls will be run.
/// Policy does NOT take ownership of the reference to the combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a smart pointer that does pass ownership
// of a reference.
grpc_combiner* combiner = nullptr;
/// Used to create channels and subchannels.
grpc_client_channel_factory* client_channel_factory = nullptr;
/// Subchannel pool.
RefCountedPtr<SubchannelPoolInterface> subchannel_pool;
/// Channel args from the resolver.
/// Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.
grpc_channel_args* args = nullptr;
/// Load balancing config from the resolver.
grpc_json* lb_config = nullptr;
};
/// State used for an LB pick. /// State used for an LB pick.
struct PickState { struct PickState {
/// Initial metadata associated with the picking call. /// Initial metadata associated with the picking call.
/// This is both an input and output parameter; the LB policy may
/// use metadata here to influence its routing decision, and it may
/// add new metadata here to be sent with the call to the chosen backend.
grpc_metadata_batch* initial_metadata = nullptr; grpc_metadata_batch* initial_metadata = nullptr;
/// Pointer to bitmask used for selective cancelling. See
/// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
uint32_t* initial_metadata_flags = nullptr;
/// Storage for LB token in \a initial_metadata, or nullptr if not used. /// Storage for LB token in \a initial_metadata, or nullptr if not used.
// TODO(roth): Remove this from the API. Maybe have the LB policy
// allocate this on the arena instead?
grpc_linked_mdelem lb_token_mdelem_storage; grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously. /// Callback set by lb policy to be notified of trailing metadata.
/// If null, pick will fail if a result is not available synchronously. /// The callback must be scheduled on grpc_schedule_on_exec_ctx.
grpc_closure* on_complete = nullptr;
// Callback set by lb policy to be notified of trailing metadata.
// The callback must be scheduled on grpc_schedule_on_exec_ctx.
grpc_closure* recv_trailing_metadata_ready = nullptr; grpc_closure* recv_trailing_metadata_ready = nullptr;
// The address that will be set to point to the original /// The address that will be set to point to the original
// recv_trailing_metadata_ready callback, to be invoked by the LB /// recv_trailing_metadata_ready callback, to be invoked by the LB
// policy's recv_trailing_metadata_ready callback when complete. /// policy's recv_trailing_metadata_ready callback when complete.
// Must be non-null if recv_trailing_metadata_ready is non-null. /// Must be non-null if recv_trailing_metadata_ready is non-null.
grpc_closure** original_recv_trailing_metadata_ready = nullptr; grpc_closure** original_recv_trailing_metadata_ready = nullptr;
// If this is not nullptr, then the client channel will point it to the /// If this is not nullptr, then the client channel will point it to the
// call's trailing metadata before invoking recv_trailing_metadata_ready. /// call's trailing metadata before invoking recv_trailing_metadata_ready.
// If this is nullptr, then the callback will still be called. /// If this is nullptr, then the callback will still be called.
// The lb does not have ownership of the metadata. /// The lb does not have ownership of the metadata.
grpc_metadata_batch** recv_trailing_metadata = nullptr; grpc_metadata_batch** recv_trailing_metadata = nullptr;
/// Will be set to the selected subchannel, or nullptr on failure or when /// Will be set to the selected subchannel, or nullptr on failure or when
/// the LB policy decides to drop the call. /// the LB policy decides to drop the call.
RefCountedPtr<ConnectedSubchannel> connected_subchannel; RefCountedPtr<ConnectedSubchannel> connected_subchannel;
/// Will be populated with context to pass to the subchannel call, if /// Will be populated with context to pass to the subchannel call, if
/// needed. /// needed.
// TODO(roth): Remove this from the API, especially since it's not
// working properly anyway (see https://github.com/grpc/grpc/issues/15927).
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {}; grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
/// Next pointer. For internal use by LB policy. };
PickState* next = nullptr;
/// A picker is the object used to actual perform picks.
///
/// Pickers are intended to encapsulate all of the state and logic
/// needed on the data plane (i.e., to actually process picks for
/// individual RPCs sent on the channel) while excluding all of the
/// state and logic needed on the control plane (i.e., resolver
/// updates, connectivity state notifications, etc); the latter should
/// live in the LB policy object itself.
///
/// Currently, pickers are always accessed from within the
/// client_channel combiner, so they do not have to be thread-safe.
// TODO(roth): In a subsequent PR, split the data plane work (i.e.,
// the interaction with the picker) and the control plane work (i.e.,
// the interaction with the LB policy) into two different
// synchronization mechanisms, to avoid lock contention between the two.
class SubchannelPicker {
public:
enum PickResult {
// Pick complete. If connected_subchannel is non-null, client channel
// can immediately proceed with the call on connected_subchannel;
// otherwise, call should be dropped.
PICK_COMPLETE,
// Pick cannot be completed until something changes on the control
// plane. Client channel will queue the pick and try again the
// next time the picker is updated.
PICK_QUEUE,
// LB policy is in transient failure. If the pick is wait_for_ready,
// client channel will wait for the next picker and try again;
// otherwise, the call will be failed immediately (although it may
// be retried if the client channel is configured to do so).
// The Pick() method will set its error parameter if this value is
// returned.
PICK_TRANSIENT_FAILURE,
};
SubchannelPicker() = default;
virtual ~SubchannelPicker() = default;
virtual PickResult Pick(PickState* pick, grpc_error** error) GRPC_ABSTRACT;
GRPC_ABSTRACT_BASE_CLASS
};
// A picker that returns PICK_QUEUE for all picks.
// Also calls the parent LB policy's ExitIdleLocked() method when the
// first pick is seen.
class QueuePicker : public SubchannelPicker {
public:
explicit QueuePicker(RefCountedPtr<LoadBalancingPolicy> parent)
: parent_(std::move(parent)) {}
PickResult Pick(PickState* pick, grpc_error** error) override {
// We invoke the parent's ExitIdleLocked() via a closure instead
// of doing it directly here, for two reasons:
// 1. ExitIdleLocked() may cause the policy's state to change and
// a new picker to be delivered to the channel. If that new
// picker is delivered before ExitIdleLocked() returns, then by
// the time this function returns, the pick will already have
// been processed, and we'll be trying to re-process the same
// pick again, leading to a crash.
// 2. In a subsequent PR, we will split the data plane and control
// plane synchronization into separate combiners, at which
// point this will need to hop from the data plane combiner into
// the control plane combiner.
if (!exit_idle_called_) {
exit_idle_called_ = true;
parent_->Ref().release(); // ref held by closure.
GRPC_CLOSURE_SCHED(
GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(),
grpc_combiner_scheduler(parent_->combiner())),
GRPC_ERROR_NONE);
}
return PICK_QUEUE;
}
private:
static void CallExitIdle(void* arg, grpc_error* error) {
LoadBalancingPolicy* parent = static_cast<LoadBalancingPolicy*>(arg);
parent->ExitIdleLocked();
parent->Unref();
}
RefCountedPtr<LoadBalancingPolicy> parent_;
bool exit_idle_called_ = false;
};
// A picker that returns PICK_TRANSIENT_FAILURE for all picks.
class TransientFailurePicker : public SubchannelPicker {
public:
explicit TransientFailurePicker(grpc_error* error) : error_(error) {}
~TransientFailurePicker() { GRPC_ERROR_UNREF(error_); }
PickResult Pick(PickState* pick, grpc_error** error) override {
*error = GRPC_ERROR_REF(error_);
return PICK_TRANSIENT_FAILURE;
}
private:
grpc_error* error_;
};
/// A proxy object used by the LB policy to communicate with the client
/// channel.
class ChannelControlHelper {
public:
ChannelControlHelper() = default;
virtual ~ChannelControlHelper() = default;
/// Creates a new subchannel with the specified channel args.
virtual Subchannel* CreateSubchannel(const grpc_channel_args& args)
GRPC_ABSTRACT;
/// Creates a channel with the specified target, type, and channel args.
virtual grpc_channel* CreateChannel(
const char* target, grpc_client_channel_type type,
const grpc_channel_args& args) GRPC_ABSTRACT;
/// Sets the connectivity state and returns a new picker to be used
/// by the client channel.
virtual void UpdateState(grpc_connectivity_state state,
grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) {
std::move(picker); // Suppress clang-tidy complaint.
// The rest of this is copied from the GRPC_ABSTRACT macro.
gpr_log(GPR_ERROR, "Function marked GRPC_ABSTRACT was not implemented");
GPR_ASSERT(false);
}
/// Requests that the resolver re-resolve.
virtual void RequestReresolution() GRPC_ABSTRACT;
GRPC_ABSTRACT_BASE_CLASS
};
/// Args used to instantiate an LB policy.
struct Args {
/// The combiner under which all LB policy calls will be run.
/// Policy does NOT take ownership of the reference to the combiner.
// TODO(roth): Once we have a C++-like interface for combiners, this
// API should change to take a smart pointer that does pass ownership
// of a reference.
grpc_combiner* combiner = nullptr;
/// Channel control helper.
/// Note: LB policies MUST NOT call any method on the helper from
/// their constructor.
UniquePtr<ChannelControlHelper> channel_control_helper;
/// Channel args.
const grpc_channel_args* args = nullptr;
}; };
// Not copyable nor movable. // Not copyable nor movable.
@ -108,57 +238,17 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
virtual const char* name() const GRPC_ABSTRACT; virtual const char* name() const GRPC_ABSTRACT;
/// Updates the policy with a new set of \a args and a new \a lb_config from /// Updates the policy with a new set of \a args and a new \a lb_config from
/// the resolver. Note that the LB policy gets the set of addresses from the /// the resolver. Will be invoked immediately after LB policy is constructed,
/// and then again whenever the resolver returns a new result.
/// Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_SERVER_ADDRESS_LIST channel arg. /// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.
virtual void UpdateLocked(const grpc_channel_args& args, virtual void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) GRPC_ABSTRACT; grpc_json* lb_config) GRPC_ABSTRACT;
/// Finds an appropriate subchannel for a call, based on data in \a pick.
/// \a pick must remain alive until the pick is complete.
///
/// If a result is known immediately, returns true, setting \a *error
/// upon failure. Otherwise, \a pick->on_complete will be invoked once
/// the pick is complete with its error argument set to indicate success
/// or failure.
///
/// If \a pick->on_complete is null and no result is known immediately,
/// a synchronous failure will be returned (i.e., \a *error will be
/// set and true will be returned).
virtual bool PickLocked(PickState* pick, grpc_error** error) GRPC_ABSTRACT;
/// Cancels \a pick.
/// The \a on_complete callback of the pending pick will be invoked with
/// \a pick->connected_subchannel set to null.
virtual void CancelPickLocked(PickState* pick,
grpc_error* error) GRPC_ABSTRACT;
/// Cancels all pending picks for which their \a initial_metadata_flags (as
/// given in the call to \a PickLocked()) matches
/// \a initial_metadata_flags_eq when ANDed with
/// \a initial_metadata_flags_mask.
virtual void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) GRPC_ABSTRACT;
/// Requests a notification when the connectivity state of the policy
/// changes from \a *state. When that happens, sets \a *state to the
/// new state and schedules \a closure.
virtual void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
grpc_closure* closure) GRPC_ABSTRACT;
/// Returns the policy's current connectivity state. Sets \a error to
/// the associated error, if any.
virtual grpc_connectivity_state CheckConnectivityLocked(
grpc_error** connectivity_error) GRPC_ABSTRACT;
/// Hands off pending picks to \a new_policy.
virtual void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy)
GRPC_ABSTRACT;
/// Tries to enter a READY connectivity state. /// Tries to enter a READY connectivity state.
/// TODO(roth): As part of restructuring how we handle IDLE state, /// This is a no-op by default, since most LB policies never go into
/// consider whether this method is still needed. /// IDLE state.
virtual void ExitIdleLocked() GRPC_ABSTRACT; virtual void ExitIdleLocked() {}
/// Resets connection backoff. /// Resets connection backoff.
virtual void ResetBackoffLocked() GRPC_ABSTRACT; virtual void ResetBackoffLocked() GRPC_ABSTRACT;
@ -183,18 +273,11 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// given the JSON node of a LoadBalancingConfig array. /// given the JSON node of a LoadBalancingConfig array.
static grpc_json* ParseLoadBalancingConfig(const grpc_json* lb_config_array); static grpc_json* ParseLoadBalancingConfig(const grpc_json* lb_config_array);
/// Sets the re-resolution closure to \a request_reresolution.
void SetReresolutionClosureLocked(grpc_closure* request_reresolution) {
GPR_ASSERT(request_reresolution_ == nullptr);
request_reresolution_ = request_reresolution;
}
grpc_pollset_set* interested_parties() const { return interested_parties_; } grpc_pollset_set* interested_parties() const { return interested_parties_; }
// Callers that need their own reference can call the returned void set_channelz_node(
// object's Ref() method. RefCountedPtr<channelz::ClientChannelNode> channelz_node) {
SubchannelPoolInterface* subchannel_pool() const { channelz_node_ = std::move(channelz_node);
return subchannel_pool_.get();
} }
GRPC_ABSTRACT_BASE_CLASS GRPC_ABSTRACT_BASE_CLASS
@ -202,12 +285,20 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
protected: protected:
GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE
explicit LoadBalancingPolicy(Args args); explicit LoadBalancingPolicy(Args args, intptr_t initial_refcount = 1);
virtual ~LoadBalancingPolicy(); virtual ~LoadBalancingPolicy();
grpc_combiner* combiner() const { return combiner_; } grpc_combiner* combiner() const { return combiner_; }
grpc_client_channel_factory* client_channel_factory() const {
return client_channel_factory_; // Note: LB policies MUST NOT call any method on the helper from
// their constructor.
// Note: This will return null after ShutdownLocked() has been called.
ChannelControlHelper* channel_control_helper() const {
return channel_control_helper_.get();
}
channelz::ClientChannelNode* channelz_node() const {
return channelz_node_.get();
} }
/// Shuts down the policy. Any pending picks that have not been /// Shuts down the policy. Any pending picks that have not been
@ -215,27 +306,22 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// failed. /// failed.
virtual void ShutdownLocked() GRPC_ABSTRACT; virtual void ShutdownLocked() GRPC_ABSTRACT;
/// Tries to request a re-resolution.
void TryReresolutionLocked(grpc_core::TraceFlag* grpc_lb_trace,
grpc_error* error);
private: private:
static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) { static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) {
LoadBalancingPolicy* policy = static_cast<LoadBalancingPolicy*>(arg); LoadBalancingPolicy* policy = static_cast<LoadBalancingPolicy*>(arg);
policy->ShutdownLocked(); policy->ShutdownLocked();
policy->channel_control_helper_.reset();
policy->Unref(); policy->Unref();
} }
/// Combiner under which LB policy actions take place. /// Combiner under which LB policy actions take place.
grpc_combiner* combiner_; grpc_combiner* combiner_;
/// Client channel factory, used to create channels and subchannels.
grpc_client_channel_factory* client_channel_factory_;
/// Subchannel pool.
RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
/// Owned pointer to interested parties in load balancing decisions. /// Owned pointer to interested parties in load balancing decisions.
grpc_pollset_set* interested_parties_; grpc_pollset_set* interested_parties_;
/// Callback to force a re-resolution. /// Channel control helper.
grpc_closure* request_reresolution_; UniquePtr<ChannelControlHelper> channel_control_helper_;
/// Channelz node.
RefCountedPtr<channelz::ClientChannelNode> channelz_node_;
}; };
} // namespace grpc_core } // namespace grpc_core

@ -43,7 +43,7 @@ void GrpcLbClientStats::AddCallFinished(
} }
} }
void GrpcLbClientStats::AddCallDroppedLocked(char* token) { void GrpcLbClientStats::AddCallDroppedLocked(const char* token) {
// Increment num_calls_started and num_calls_finished. // Increment num_calls_started and num_calls_finished.
gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1); gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1);
gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1); gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1);

@ -48,7 +48,7 @@ class GrpcLbClientStats : public RefCounted<GrpcLbClientStats> {
bool finished_known_received); bool finished_known_received);
// This method is not thread-safe; caller must synchronize. // This method is not thread-safe; caller must synchronize.
void AddCallDroppedLocked(char* token); void AddCallDroppedLocked(const char* token);
// This method is not thread-safe; caller must synchronize. // This method is not thread-safe; caller must synchronize.
void GetLocked(int64_t* num_calls_started, int64_t* num_calls_finished, void GetLocked(int64_t* num_calls_started, int64_t* num_calls_finished,

@ -52,16 +52,6 @@ class PickFirst : public LoadBalancingPolicy {
void UpdateLocked(const grpc_channel_args& args, void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override; grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) override;
void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
grpc_closure* closure) override;
grpc_connectivity_state CheckConnectivityLocked(
grpc_error** connectivity_error) override;
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void ExitIdleLocked() override; void ExitIdleLocked() override;
void ResetBackoffLocked() override; void ResetBackoffLocked() override;
void FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels, void FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
@ -99,10 +89,9 @@ class PickFirst : public LoadBalancingPolicy {
PickFirstSubchannelList(PickFirst* policy, TraceFlag* tracer, PickFirstSubchannelList(PickFirst* policy, TraceFlag* tracer,
const ServerAddressList& addresses, const ServerAddressList& addresses,
grpc_combiner* combiner, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
const grpc_channel_args& args) const grpc_channel_args& args)
: SubchannelList(policy, tracer, addresses, combiner, : SubchannelList(policy, tracer, addresses, combiner,
client_channel_factory, args) { policy->channel_control_helper(), args) {
// Need to maintain a ref to the LB policy as long as we maintain // Need to maintain a ref to the LB policy as long as we maintain
// any references to subchannels, since the subchannels' // any references to subchannels, since the subchannels'
// pollset_sets will include the LB policy's pollset_set. // pollset_sets will include the LB policy's pollset_set.
@ -115,6 +104,20 @@ class PickFirst : public LoadBalancingPolicy {
} }
}; };
class Picker : public SubchannelPicker {
public:
explicit Picker(RefCountedPtr<ConnectedSubchannel> connected_subchannel)
: connected_subchannel_(std::move(connected_subchannel)) {}
PickResult Pick(PickState* pick, grpc_error** error) override {
pick->connected_subchannel = connected_subchannel_;
return PICK_COMPLETE;
}
private:
RefCountedPtr<ConnectedSubchannel> connected_subchannel_;
};
// Helper class to ensure that any function that modifies the child refs // Helper class to ensure that any function that modifies the child refs
// data structures will update the channelz snapshot data structures before // data structures will update the channelz snapshot data structures before
// returning. // returning.
@ -129,7 +132,6 @@ class PickFirst : public LoadBalancingPolicy {
void ShutdownLocked() override; void ShutdownLocked() override;
void StartPickingLocked();
void UpdateChildRefsLocked(); void UpdateChildRefsLocked();
// All our subchannels. // All our subchannels.
@ -138,14 +140,10 @@ class PickFirst : public LoadBalancingPolicy {
OrphanablePtr<PickFirstSubchannelList> latest_pending_subchannel_list_; OrphanablePtr<PickFirstSubchannelList> latest_pending_subchannel_list_;
// Selected subchannel in \a subchannel_list_. // Selected subchannel in \a subchannel_list_.
PickFirstSubchannelData* selected_ = nullptr; PickFirstSubchannelData* selected_ = nullptr;
// Have we started picking? // Are we in IDLE state?
bool started_picking_ = false; bool idle_ = false;
// Are we shut down? // Are we shut down?
bool shutdown_ = false; bool shutdown_ = false;
// List of picks that are waiting on connectivity.
PickState* pending_picks_ = nullptr;
// Our connectivity state tracker.
grpc_connectivity_state_tracker state_tracker_;
/// Lock and data used to capture snapshots of this channels child /// Lock and data used to capture snapshots of this channels child
/// channels and subchannels. This data is consumed by channelz. /// channels and subchannels. This data is consumed by channelz.
@ -155,14 +153,10 @@ class PickFirst : public LoadBalancingPolicy {
}; };
PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) { PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
GPR_ASSERT(args.client_channel_factory != nullptr);
gpr_mu_init(&child_refs_mu_); gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"pick_first");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p created.", this); gpr_log(GPR_INFO, "Pick First %p created.", this);
} }
UpdateLocked(*args.args, args.lb_config);
} }
PickFirst::~PickFirst() { PickFirst::~PickFirst() {
@ -172,94 +166,26 @@ PickFirst::~PickFirst() {
gpr_mu_destroy(&child_refs_mu_); gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
GPR_ASSERT(pending_picks_ == nullptr);
grpc_connectivity_state_destroy(&state_tracker_);
}
void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
PickState* pick;
while ((pick = pending_picks_) != nullptr) {
pending_picks_ = pick->next;
grpc_error* error = GRPC_ERROR_NONE;
if (new_policy->PickLocked(pick, &error)) {
// Synchronous return, schedule closure.
GRPC_CLOSURE_SCHED(pick->on_complete, error);
}
}
} }
void PickFirst::ShutdownLocked() { void PickFirst::ShutdownLocked() {
AutoChildRefsUpdater guard(this); AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p Shutting down", this); gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
} }
shutdown_ = true; shutdown_ = true;
PickState* pick;
while ((pick = pending_picks_) != nullptr) {
pending_picks_ = pick->next;
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "shutdown");
subchannel_list_.reset(); subchannel_list_.reset();
latest_pending_subchannel_list_.reset(); latest_pending_subchannel_list_.reset();
TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
void PickFirst::CancelPickLocked(PickState* pick, grpc_error* error) {
PickState* pp = pending_picks_;
pending_picks_ = nullptr;
while (pp != nullptr) {
PickState* next = pp->next;
if (pp == pick) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pp->next = pending_picks_;
pending_picks_ = pp;
}
pp = next;
}
GRPC_ERROR_UNREF(error);
}
void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
PickState* pick = pending_picks_;
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pick->next = pending_picks_;
pending_picks_ = pick;
}
pick = next;
}
GRPC_ERROR_UNREF(error);
}
void PickFirst::StartPickingLocked() {
started_picking_ = true;
if (subchannel_list_ != nullptr && subchannel_list_->num_subchannels() > 0) {
subchannel_list_->subchannel(0)
->CheckConnectivityStateAndStartWatchingLocked();
}
} }
void PickFirst::ExitIdleLocked() { void PickFirst::ExitIdleLocked() {
if (!started_picking_) { if (idle_) {
StartPickingLocked(); idle_ = false;
if (subchannel_list_ != nullptr &&
subchannel_list_->num_subchannels() > 0) {
subchannel_list_->subchannel(0)
->CheckConnectivityStateAndStartWatchingLocked();
}
} }
} }
@ -270,36 +196,6 @@ void PickFirst::ResetBackoffLocked() {
} }
} }
bool PickFirst::PickLocked(PickState* pick, grpc_error** error) {
// If we have a selected subchannel already, return synchronously.
if (selected_ != nullptr) {
pick->connected_subchannel = selected_->connected_subchannel()->Ref();
return true;
}
// No subchannel selected yet, so handle asynchronously.
if (pick->on_complete == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No pick result available but synchronous result required.");
return true;
}
pick->next = pending_picks_;
pending_picks_ = pick;
if (!started_picking_) {
StartPickingLocked();
}
return false;
}
grpc_connectivity_state PickFirst::CheckConnectivityLocked(grpc_error** error) {
return grpc_connectivity_state_get(&state_tracker_, error);
}
void PickFirst::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
grpc_closure* notify) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
notify);
}
void PickFirst::FillChildRefsForChannelz( void PickFirst::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels_to_fill, channelz::ChildRefsList* child_subchannels_to_fill,
channelz::ChildRefsList* ignored) { channelz::ChildRefsList* ignored) {
@ -341,10 +237,11 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
if (addresses == nullptr) { if (addresses == nullptr) {
if (subchannel_list_ == nullptr) { if (subchannel_list_ == nullptr) {
// If we don't have a current subchannel list, go into TRANSIENT FAILURE. // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
grpc_connectivity_state_set( grpc_error* error =
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args");
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), channel_control_helper()->UpdateState(
"pf_update_missing"); GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(error),
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
} else { } else {
// otherwise, keep using the current subchannel list (ignore this update). // otherwise, keep using the current subchannel list (ignore this update).
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
@ -364,25 +261,26 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
grpc_channel_args* new_args = grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(&args, &new_arg, 1); grpc_channel_args_copy_and_add(&args, &new_arg, 1);
auto subchannel_list = MakeOrphanable<PickFirstSubchannelList>( auto subchannel_list = MakeOrphanable<PickFirstSubchannelList>(
this, &grpc_lb_pick_first_trace, *addresses, combiner(), this, &grpc_lb_pick_first_trace, *addresses, combiner(), *new_args);
client_channel_factory(), *new_args);
grpc_channel_args_destroy(new_args); grpc_channel_args_destroy(new_args);
if (subchannel_list->num_subchannels() == 0) { if (subchannel_list->num_subchannels() == 0) {
// Empty update or no valid subchannels. Unsubscribe from all current // Empty update or no valid subchannels. Unsubscribe from all current
// subchannels and put the channel in TRANSIENT_FAILURE. // subchannels and put the channel in TRANSIENT_FAILURE.
grpc_connectivity_state_set(
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
"pf_update_empty");
subchannel_list_ = std::move(subchannel_list); // Empty list. subchannel_list_ = std::move(subchannel_list); // Empty list.
selected_ = nullptr; selected_ = nullptr;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update");
channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(error),
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
return; return;
} }
// If one of the subchannels in the new list is already in state // If one of the subchannels in the new list is already in state
// READY, then select it immediately. This can happen when the // READY, then select it immediately. This can happen when the
// currently selected subchannel is also present in the update. It // currently selected subchannel is also present in the update. It
// can also happen if one of the subchannels in the update is already // can also happen if one of the subchannels in the update is already
// in the subchannel index because it's in use by another channel. // in the global subchannel pool because it's in use by another channel.
// TODO(roth): If we're in IDLE state, we should probably defer this
// check and instead do it in ExitIdleLocked().
for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) { for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
PickFirstSubchannelData* sd = subchannel_list->subchannel(i); PickFirstSubchannelData* sd = subchannel_list->subchannel(i);
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
@ -390,8 +288,8 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
if (state == GRPC_CHANNEL_READY) { if (state == GRPC_CHANNEL_READY) {
subchannel_list_ = std::move(subchannel_list); subchannel_list_ = std::move(subchannel_list);
sd->ProcessUnselectedReadyLocked();
sd->StartConnectivityWatchLocked(); sd->StartConnectivityWatchLocked();
sd->ProcessUnselectedReadyLocked();
// If there was a previously pending update (which may or may // If there was a previously pending update (which may or may
// not have contained the currently selected subchannel), drop // not have contained the currently selected subchannel), drop
// it, so that it doesn't override what we've done here. // it, so that it doesn't override what we've done here.
@ -399,7 +297,7 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
// Make sure that subsequent calls to ExitIdleLocked() don't cause // Make sure that subsequent calls to ExitIdleLocked() don't cause
// us to start watching a subchannel other than the one we've // us to start watching a subchannel other than the one we've
// selected. // selected.
started_picking_ = true; idle_ = false;
return; return;
} }
} }
@ -407,17 +305,17 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
// We don't yet have a selected subchannel, so replace the current // We don't yet have a selected subchannel, so replace the current
// subchannel list immediately. // subchannel list immediately.
subchannel_list_ = std::move(subchannel_list); subchannel_list_ = std::move(subchannel_list);
// If we've started picking, start trying to connect to the first // If we're not in IDLE state, start trying to connect to the first
// subchannel in the new list. // subchannel in the new list.
if (started_picking_) { if (!idle_) {
// Note: No need to use CheckConnectivityStateAndStartWatchingLocked() // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
// here, since we've already checked the initial connectivity // here, since we've already checked the initial connectivity
// state of all subchannels above. // state of all subchannels above.
subchannel_list_->subchannel(0)->StartConnectivityWatchLocked(); subchannel_list_->subchannel(0)->StartConnectivityWatchLocked();
} }
} else { } else {
// We do have a selected subchannel, so keep using it until one of // We do have a selected subchannel (which means it's READY), so keep
// the subchannels in the new list reports READY. // using it until one of the subchannels in the new list reports READY.
if (latest_pending_subchannel_list_ != nullptr) { if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
@ -428,9 +326,9 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args,
} }
} }
latest_pending_subchannel_list_ = std::move(subchannel_list); latest_pending_subchannel_list_ = std::move(subchannel_list);
// If we've started picking, start trying to connect to the first // If we're not in IDLE state, start trying to connect to the first
// subchannel in the new list. // subchannel in the new list.
if (started_picking_) { if (!idle_) {
// Note: No need to use CheckConnectivityStateAndStartWatchingLocked() // Note: No need to use CheckConnectivityStateAndStartWatchingLocked()
// here, since we've already checked the initial connectivity // here, since we've already checked the initial connectivity
// state of all subchannels above. // state of all subchannels above.
@ -453,7 +351,8 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
if (p->selected_ == this) { if (p->selected_ == this) {
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Pick First %p connectivity changed for selected subchannel", p); "Pick First %p selected subchannel connectivity changed to %s", p,
grpc_connectivity_state_name(connectivity_state));
} }
// If the new state is anything other than READY and there is a // If the new state is anything other than READY and there is a
// pending update, switch to the pending update. // pending update, switch to the pending update.
@ -469,32 +368,40 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
p->selected_ = nullptr; p->selected_ = nullptr;
StopConnectivityWatchLocked(); StopConnectivityWatchLocked();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
grpc_connectivity_state_set( grpc_error* new_error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
&p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, "selected subchannel not ready; switching to pending update", &error,
error != GRPC_ERROR_NONE 1);
? GRPC_ERROR_REF(error) p->channel_control_helper()->UpdateState(
: GRPC_ERROR_CREATE_FROM_STATIC_STRING( GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
"selected subchannel not ready; switching to pending " UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(new_error)));
"update"),
"selected_not_ready+switch_to_update");
} else { } else {
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected subchannel goes bad, request a re-resolution. We also // If the selected subchannel goes bad, request a re-resolution. We
// set the channel state to IDLE and reset started_picking_. The reason // also set the channel state to IDLE. The reason is that if the new
// is that if the new state is TRANSIENT_FAILURE due to a GOAWAY // state is TRANSIENT_FAILURE due to a GOAWAY reception we don't want
// reception we don't want to connect to the re-resolved backends until // to connect to the re-resolved backends until we leave IDLE state.
// we leave the IDLE state. p->idle_ = true;
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_IDLE, p->channel_control_helper()->RequestReresolution();
GRPC_ERROR_NONE,
"selected_changed+reresolve");
p->started_picking_ = false;
p->TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_NONE);
// In transient failure. Rely on re-resolution to recover. // In transient failure. Rely on re-resolution to recover.
p->selected_ = nullptr; p->selected_ = nullptr;
StopConnectivityWatchLocked(); StopConnectivityWatchLocked();
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_IDLE, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<QueuePicker>(p->Ref())));
} else { } else {
grpc_connectivity_state_set(&p->state_tracker_, connectivity_state, // This is unlikely but can happen when a subchannel has been asked
GRPC_ERROR_REF(error), "selected_changed"); // to reconnect by a different channel and this channel has dropped
// some connectivity state notifications.
if (connectivity_state == GRPC_CHANNEL_READY) {
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(
New<Picker>(connected_subchannel()->Ref())));
} else { // CONNECTING
p->channel_control_helper()->UpdateState(
connectivity_state, GRPC_ERROR_REF(error),
UniquePtr<SubchannelPicker>(New<QueuePicker>(p->Ref())));
}
// Renew notification. // Renew notification.
RenewConnectivityWatchLocked(); RenewConnectivityWatchLocked();
} }
@ -513,9 +420,9 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// select in place of the current one. // select in place of the current one.
switch (connectivity_state) { switch (connectivity_state) {
case GRPC_CHANNEL_READY: { case GRPC_CHANNEL_READY: {
ProcessUnselectedReadyLocked();
// Renew notification. // Renew notification.
RenewConnectivityWatchLocked(); RenewConnectivityWatchLocked();
ProcessUnselectedReadyLocked();
break; break;
} }
case GRPC_CHANNEL_TRANSIENT_FAILURE: { case GRPC_CHANNEL_TRANSIENT_FAILURE: {
@ -527,10 +434,14 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
// Case 1: Only set state to TRANSIENT_FAILURE if we've tried // Case 1: Only set state to TRANSIENT_FAILURE if we've tried
// all subchannels. // all subchannels.
if (sd->Index() == 0 && subchannel_list() == p->subchannel_list_.get()) { if (sd->Index() == 0 && subchannel_list() == p->subchannel_list_.get()) {
p->TryReresolutionLocked(&grpc_lb_pick_first_trace, GRPC_ERROR_NONE); p->channel_control_helper()->RequestReresolution();
grpc_connectivity_state_set( grpc_error* new_error =
&p->state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
GRPC_ERROR_REF(error), "exhausted_subchannels"); "failed to connect to all addresses", &error, 1);
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(new_error),
UniquePtr<SubchannelPicker>(
New<TransientFailurePicker>(new_error)));
} }
sd->CheckConnectivityStateAndStartWatchingLocked(); sd->CheckConnectivityStateAndStartWatchingLocked();
break; break;
@ -539,9 +450,9 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
case GRPC_CHANNEL_IDLE: { case GRPC_CHANNEL_IDLE: {
// Only update connectivity state in case 1. // Only update connectivity state in case 1.
if (subchannel_list() == p->subchannel_list_.get()) { if (subchannel_list() == p->subchannel_list_.get()) {
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING, p->channel_control_helper()->UpdateState(
GRPC_ERROR_REF(error), GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
"connecting_changed"); UniquePtr<SubchannelPicker>(New<QueuePicker>(p->Ref())));
} }
// Renew notification. // Renew notification.
RenewConnectivityWatchLocked(); RenewConnectivityWatchLocked();
@ -578,38 +489,30 @@ void PickFirst::PickFirstSubchannelData::ProcessUnselectedReadyLocked() {
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
} }
// Cases 1 and 2. // Cases 1 and 2.
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "subchannel_ready");
p->selected_ = this; p->selected_ = this;
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<Picker>(connected_subchannel()->Ref())));
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p, subchannel()); gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p, subchannel());
} }
// Update any calls that were waiting for a pick.
PickState* pick;
while ((pick = p->pending_picks_)) {
p->pending_picks_ = pick->next;
pick->connected_subchannel = p->selected_->connected_subchannel()->Ref();
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_INFO, "Servicing pending pick with selected subchannel %p",
p->selected_->subchannel());
}
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
} }
void PickFirst::PickFirstSubchannelData:: void PickFirst::PickFirstSubchannelData::
CheckConnectivityStateAndStartWatchingLocked() { CheckConnectivityStateAndStartWatchingLocked() {
PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy()); PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy());
// Check current state.
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
if (p->selected_ != this && grpc_connectivity_state current_state = CheckConnectivityStateLocked(&error);
CheckConnectivityStateLocked(&error) == GRPC_CHANNEL_READY) {
// We must process the READY subchannel before we start watching it.
// Otherwise, we won't know it's READY because we will be waiting for its
// connectivity state to change from READY.
ProcessUnselectedReadyLocked();
}
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
// Start watch.
StartConnectivityWatchLocked(); StartConnectivityWatchLocked();
// If current state is READY, select the subchannel now, since we started
// watching from this state and will not get a notification of it
// transitioning into this state.
if (p->selected_ != this && current_state == GRPC_CHANNEL_READY) {
ProcessUnselectedReadyLocked();
}
} }
// //

@ -26,6 +26,7 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include <stdlib.h>
#include <string.h> #include <string.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
@ -62,17 +63,6 @@ class RoundRobin : public LoadBalancingPolicy {
void UpdateLocked(const grpc_channel_args& args, void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override; grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) override;
void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
grpc_closure* closure) override;
grpc_connectivity_state CheckConnectivityLocked(
grpc_error** connectivity_error) override;
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override; void ResetBackoffLocked() override;
void FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels, void FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* ignored) override; channelz::ChildRefsList* ignored) override;
@ -117,14 +107,12 @@ class RoundRobin : public LoadBalancingPolicy {
: public SubchannelList<RoundRobinSubchannelList, : public SubchannelList<RoundRobinSubchannelList,
RoundRobinSubchannelData> { RoundRobinSubchannelData> {
public: public:
RoundRobinSubchannelList( RoundRobinSubchannelList(RoundRobin* policy, TraceFlag* tracer,
RoundRobin* policy, TraceFlag* tracer, const ServerAddressList& addresses,
const ServerAddressList& addresses, grpc_combiner* combiner, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory, const grpc_channel_args& args)
const grpc_channel_args& args)
: SubchannelList(policy, tracer, addresses, combiner, : SubchannelList(policy, tracer, addresses, combiner,
client_channel_factory, args), policy->channel_control_helper(), args) {
last_ready_index_(num_subchannels() - 1) {
// Need to maintain a ref to the LB policy as long as we maintain // Need to maintain a ref to the LB policy as long as we maintain
// any references to subchannels, since the subchannels' // any references to subchannels, since the subchannels'
// pollset_sets will include the LB policy's pollset_set. // pollset_sets will include the LB policy's pollset_set.
@ -157,15 +145,25 @@ class RoundRobin : public LoadBalancingPolicy {
// subchannels in each state. // subchannels in each state.
void UpdateRoundRobinStateFromSubchannelStateCountsLocked(); void UpdateRoundRobinStateFromSubchannelStateCountsLocked();
size_t GetNextReadySubchannelIndexLocked();
void UpdateLastReadySubchannelIndexLocked(size_t last_ready_index);
private: private:
size_t num_ready_ = 0; size_t num_ready_ = 0;
size_t num_connecting_ = 0; size_t num_connecting_ = 0;
size_t num_transient_failure_ = 0; size_t num_transient_failure_ = 0;
grpc_error* last_transient_failure_error_ = GRPC_ERROR_NONE; grpc_error* last_transient_failure_error_ = GRPC_ERROR_NONE;
size_t last_ready_index_; // Index into list of last pick. };
class Picker : public SubchannelPicker {
public:
Picker(RoundRobin* parent, RoundRobinSubchannelList* subchannel_list);
PickResult Pick(PickState* pick, grpc_error** error) override;
private:
// Using pointer value only, no ref held -- do not dereference!
RoundRobin* parent_;
size_t last_picked_index_;
InlinedVector<RefCountedPtr<ConnectedSubchannel>, 10> subchannels_;
}; };
// Helper class to ensure that any function that modifies the child refs // Helper class to ensure that any function that modifies the child refs
@ -182,9 +180,6 @@ class RoundRobin : public LoadBalancingPolicy {
void ShutdownLocked() override; void ShutdownLocked() override;
void StartPickingLocked();
bool DoPickLocked(PickState* pick);
void DrainPendingPicksLocked();
void UpdateChildRefsLocked(); void UpdateChildRefsLocked();
/** list of subchannels */ /** list of subchannels */
@ -195,14 +190,8 @@ class RoundRobin : public LoadBalancingPolicy {
* racing callbacks that reference outdated subchannel lists won't perform any * racing callbacks that reference outdated subchannel lists won't perform any
* update. */ * update. */
OrphanablePtr<RoundRobinSubchannelList> latest_pending_subchannel_list_; OrphanablePtr<RoundRobinSubchannelList> latest_pending_subchannel_list_;
/** have we started picking? */
bool started_picking_ = false;
/** are we shutting down? */ /** are we shutting down? */
bool shutdown_ = false; bool shutdown_ = false;
/** List of picks that are waiting on connectivity */
PickState* pending_picks_ = nullptr;
/** our connectivity state tracker */
grpc_connectivity_state_tracker state_tracker_;
/// Lock and data used to capture snapshots of this channel's child /// Lock and data used to capture snapshots of this channel's child
/// channels and subchannels. This data is consumed by channelz. /// channels and subchannels. This data is consumed by channelz.
gpr_mu child_refs_mu_; gpr_mu child_refs_mu_;
@ -210,15 +199,56 @@ class RoundRobin : public LoadBalancingPolicy {
channelz::ChildRefsList child_channels_; channelz::ChildRefsList child_channels_;
}; };
//
// RoundRobin::Picker
//
RoundRobin::Picker::Picker(RoundRobin* parent,
RoundRobinSubchannelList* subchannel_list)
: parent_(parent) {
for (size_t i = 0; i < subchannel_list->num_subchannels(); ++i) {
auto* connected_subchannel =
subchannel_list->subchannel(i)->connected_subchannel();
if (connected_subchannel != nullptr) {
subchannels_.push_back(connected_subchannel->Ref());
}
}
// For discussion on why we generate a random starting index for
// the picker, see https://github.com/grpc/grpc-go/issues/2580.
// TODO(roth): rand(3) is not thread-safe. This should be replaced with
// something better as part of https://github.com/grpc/grpc/issues/17891.
last_picked_index_ = rand() % subchannels_.size();
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p picker %p] created picker from subchannel_list=%p "
"with %" PRIuPTR " READY subchannels; last_picked_index_=%" PRIuPTR,
parent_, this, subchannel_list, subchannels_.size(),
last_picked_index_);
}
}
RoundRobin::Picker::PickResult RoundRobin::Picker::Pick(PickState* pick,
grpc_error** error) {
last_picked_index_ = (last_picked_index_ + 1) % subchannels_.size();
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p picker %p] returning index %" PRIuPTR
", connected_subchannel=%p",
parent_, this, last_picked_index_,
subchannels_[last_picked_index_].get());
}
pick->connected_subchannel = subchannels_[last_picked_index_];
return PICK_COMPLETE;
}
//
// RoundRobin
//
RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) { RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
GPR_ASSERT(args.client_channel_factory != nullptr);
gpr_mu_init(&child_refs_mu_); gpr_mu_init(&child_refs_mu_);
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"round_robin");
UpdateLocked(*args.args, args.lb_config);
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this, gpr_log(GPR_INFO, "[RR %p] Created", this);
subchannel_list_->num_subchannels());
} }
} }
@ -229,93 +259,16 @@ RoundRobin::~RoundRobin() {
gpr_mu_destroy(&child_refs_mu_); gpr_mu_destroy(&child_refs_mu_);
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
GPR_ASSERT(pending_picks_ == nullptr);
grpc_connectivity_state_destroy(&state_tracker_);
}
void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
PickState* pick;
while ((pick = pending_picks_) != nullptr) {
pending_picks_ = pick->next;
grpc_error* error = GRPC_ERROR_NONE;
if (new_policy->PickLocked(pick, &error)) {
// Synchronous return, schedule closure.
GRPC_CLOSURE_SCHED(pick->on_complete, error);
}
}
} }
void RoundRobin::ShutdownLocked() { void RoundRobin::ShutdownLocked() {
AutoChildRefsUpdater guard(this); AutoChildRefsUpdater guard(this);
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Shutting down", this); gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
} }
shutdown_ = true; shutdown_ = true;
PickState* pick;
while ((pick = pending_picks_) != nullptr) {
pending_picks_ = pick->next;
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_REF(error));
}
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "rr_shutdown");
subchannel_list_.reset(); subchannel_list_.reset();
latest_pending_subchannel_list_.reset(); latest_pending_subchannel_list_.reset();
TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error);
}
void RoundRobin::CancelPickLocked(PickState* pick, grpc_error* error) {
PickState* pp = pending_picks_;
pending_picks_ = nullptr;
while (pp != nullptr) {
PickState* next = pp->next;
if (pp == pick) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pp->next = pending_picks_;
pending_picks_ = pp;
}
pp = next;
}
GRPC_ERROR_UNREF(error);
}
void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
PickState* pick = pending_picks_;
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pick->next = pending_picks_;
pending_picks_ = pick;
}
pick = next;
}
GRPC_ERROR_UNREF(error);
}
void RoundRobin::StartPickingLocked() {
started_picking_ = true;
subchannel_list_->StartWatchingLocked();
}
void RoundRobin::ExitIdleLocked() {
if (!started_picking_) {
StartPickingLocked();
}
} }
void RoundRobin::ResetBackoffLocked() { void RoundRobin::ResetBackoffLocked() {
@ -325,60 +278,6 @@ void RoundRobin::ResetBackoffLocked() {
} }
} }
bool RoundRobin::DoPickLocked(PickState* pick) {
const size_t next_ready_index =
subchannel_list_->GetNextReadySubchannelIndexLocked();
if (next_ready_index < subchannel_list_->num_subchannels()) {
/* readily available, report right away */
RoundRobinSubchannelData* sd =
subchannel_list_->subchannel(next_ready_index);
GPR_ASSERT(sd->connected_subchannel() != nullptr);
pick->connected_subchannel = sd->connected_subchannel()->Ref();
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %" PRIuPTR ")",
this, sd->subchannel(), pick->connected_subchannel.get(),
sd->subchannel_list(), next_ready_index);
}
/* only advance the last picked pointer if the selection was used */
subchannel_list_->UpdateLastReadySubchannelIndexLocked(next_ready_index);
return true;
}
return false;
}
void RoundRobin::DrainPendingPicksLocked() {
PickState* pick;
while ((pick = pending_picks_)) {
pending_picks_ = pick->next;
GPR_ASSERT(DoPickLocked(pick));
GRPC_CLOSURE_SCHED(pick->on_complete, GRPC_ERROR_NONE);
}
}
bool RoundRobin::PickLocked(PickState* pick, grpc_error** error) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_);
}
GPR_ASSERT(!shutdown_);
if (subchannel_list_ != nullptr) {
if (DoPickLocked(pick)) return true;
}
if (pick->on_complete == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No pick result available but synchronous result required.");
return true;
}
/* no pick currently available. Save for later in list of pending picks */
pick->next = pending_picks_;
pending_picks_ = pick;
if (!started_picking_) {
StartPickingLocked();
}
return false;
}
void RoundRobin::FillChildRefsForChannelz( void RoundRobin::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels_to_fill, channelz::ChildRefsList* child_subchannels_to_fill,
channelz::ChildRefsList* ignored) { channelz::ChildRefsList* ignored) {
@ -426,14 +325,14 @@ void RoundRobin::RoundRobinSubchannelList::StartWatchingLocked() {
subchannel(i)->UpdateConnectivityStateLocked(state, error); subchannel(i)->UpdateConnectivityStateLocked(state, error);
} }
} }
// Now set the LB policy's state based on the subchannels' states.
UpdateRoundRobinStateFromSubchannelStateCountsLocked();
// Start connectivity watch for each subchannel. // Start connectivity watch for each subchannel.
for (size_t i = 0; i < num_subchannels(); i++) { for (size_t i = 0; i < num_subchannels(); i++) {
if (subchannel(i)->subchannel() != nullptr) { if (subchannel(i)->subchannel() != nullptr) {
subchannel(i)->StartConnectivityWatchLocked(); subchannel(i)->StartConnectivityWatchLocked();
} }
} }
// Now set the LB policy's state based on the subchannels' states.
UpdateRoundRobinStateFromSubchannelStateCountsLocked();
} }
void RoundRobin::RoundRobinSubchannelList::UpdateStateCountersLocked( void RoundRobin::RoundRobinSubchannelList::UpdateStateCountersLocked(
@ -462,8 +361,8 @@ void RoundRobin::RoundRobinSubchannelList::UpdateStateCountersLocked(
last_transient_failure_error_ = transient_failure_error; last_transient_failure_error_ = transient_failure_error;
} }
// Sets the RR policy's connectivity state based on the current // Sets the RR policy's connectivity state and generates a new picker based
// subchannel list. // on the current subchannel list.
void RoundRobin::RoundRobinSubchannelList:: void RoundRobin::RoundRobinSubchannelList::
MaybeUpdateRoundRobinConnectivityStateLocked() { MaybeUpdateRoundRobinConnectivityStateLocked() {
RoundRobin* p = static_cast<RoundRobin*>(policy()); RoundRobin* p = static_cast<RoundRobin*>(policy());
@ -485,18 +384,21 @@ void RoundRobin::RoundRobinSubchannelList::
*/ */
if (num_ready_ > 0) { if (num_ready_ > 0) {
/* 1) READY */ /* 1) READY */
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_READY, p->channel_control_helper()->UpdateState(
GRPC_ERROR_NONE, "rr_ready"); GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<Picker>(p, this)));
} else if (num_connecting_ > 0) { } else if (num_connecting_ > 0) {
/* 2) CONNECTING */ /* 2) CONNECTING */
grpc_connectivity_state_set(&p->state_tracker_, GRPC_CHANNEL_CONNECTING, p->channel_control_helper()->UpdateState(
GRPC_ERROR_NONE, "rr_connecting"); GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<QueuePicker>(p->Ref())));
} else if (num_transient_failure_ == num_subchannels()) { } else if (num_transient_failure_ == num_subchannels()) {
/* 3) TRANSIENT_FAILURE */ /* 3) TRANSIENT_FAILURE */
grpc_connectivity_state_set(&p->state_tracker_, p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(last_transient_failure_error_), GRPC_ERROR_REF(last_transient_failure_error_),
"rr_exhausted_subchannels"); UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(
GRPC_ERROR_REF(last_transient_failure_error_))));
} }
} }
@ -525,8 +427,6 @@ void RoundRobin::RoundRobinSubchannelList::
} }
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_); p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
} }
// Drain pending picks.
p->DrainPendingPicksLocked();
} }
// Update the RR policy's connectivity state if needed. // Update the RR policy's connectivity state if needed.
MaybeUpdateRoundRobinConnectivityStateLocked(); MaybeUpdateRoundRobinConnectivityStateLocked();
@ -566,80 +466,14 @@ void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
"Requesting re-resolution", "Requesting re-resolution",
p, subchannel()); p, subchannel());
} }
p->TryReresolutionLocked(&grpc_lb_round_robin_trace, GRPC_ERROR_NONE); p->channel_control_helper()->RequestReresolution();
} }
// Renew connectivity watch.
RenewConnectivityWatchLocked();
// Update state counters. // Update state counters.
UpdateConnectivityStateLocked(connectivity_state, error); UpdateConnectivityStateLocked(connectivity_state, error);
// Update overall state and renew notification. // Update overall state and renew notification.
subchannel_list()->UpdateRoundRobinStateFromSubchannelStateCountsLocked(); subchannel_list()->UpdateRoundRobinStateFromSubchannelStateCountsLocked();
RenewConnectivityWatchLocked();
}
/** Returns the index into p->subchannel_list->subchannels of the next
* subchannel in READY state, or p->subchannel_list->num_subchannels if no
* subchannel is READY.
*
* Note that this function does *not* update p->last_ready_subchannel_index.
* The caller must do that if it returns a pick. */
size_t
RoundRobin::RoundRobinSubchannelList::GetNextReadySubchannelIndexLocked() {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] getting next ready subchannel (out of %" PRIuPTR
"), last_ready_index=%" PRIuPTR,
policy(), num_subchannels(), last_ready_index_);
}
for (size_t i = 0; i < num_subchannels(); ++i) {
const size_t index = (i + last_ready_index_ + 1) % num_subchannels();
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_INFO,
"[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
": state=%s",
policy(), subchannel(index)->subchannel(), this, index,
grpc_connectivity_state_name(
subchannel(index)->connectivity_state()));
}
if (subchannel(index)->connectivity_state() == GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
" of subchannel_list %p",
policy(), subchannel(index)->subchannel(), index, this);
}
return index;
}
}
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO, "[RR %p] no subchannels in ready state", this);
}
return num_subchannels();
}
// Sets last_ready_index_ to last_ready_index.
void RoundRobin::RoundRobinSubchannelList::UpdateLastReadySubchannelIndexLocked(
size_t last_ready_index) {
GPR_ASSERT(last_ready_index < num_subchannels());
last_ready_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_INFO,
"[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
" (SC %p, CSC %p)",
policy(), last_ready_index,
subchannel(last_ready_index)->subchannel(),
subchannel(last_ready_index)->connected_subchannel());
}
}
grpc_connectivity_state RoundRobin::CheckConnectivityLocked(
grpc_error** error) {
return grpc_connectivity_state_get(&state_tracker_, error);
}
void RoundRobin::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
grpc_closure* notify) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
notify);
} }
void RoundRobin::UpdateLocked(const grpc_channel_args& args, void RoundRobin::UpdateLocked(const grpc_channel_args& args,
@ -651,10 +485,11 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args,
// If we don't have a current subchannel list, go into TRANSIENT_FAILURE. // If we don't have a current subchannel list, go into TRANSIENT_FAILURE.
// Otherwise, keep using the current subchannel list (ignore this update). // Otherwise, keep using the current subchannel list (ignore this update).
if (subchannel_list_ == nullptr) { if (subchannel_list_ == nullptr) {
grpc_connectivity_state_set( grpc_error* error =
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args");
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"), channel_control_helper()->UpdateState(
"rr_update_missing"); GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(error),
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
} }
return; return;
} }
@ -671,21 +506,23 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args,
} }
} }
latest_pending_subchannel_list_ = MakeOrphanable<RoundRobinSubchannelList>( latest_pending_subchannel_list_ = MakeOrphanable<RoundRobinSubchannelList>(
this, &grpc_lb_round_robin_trace, *addresses, combiner(), this, &grpc_lb_round_robin_trace, *addresses, combiner(), args);
client_channel_factory(), args); if (latest_pending_subchannel_list_->num_subchannels() == 0) {
// If we haven't started picking yet or the new list is empty, // If the new list is empty, immediately promote the new list to the
// immediately promote the new list to the current list. // current list and transition to TRANSIENT_FAILURE.
if (!started_picking_ || grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update");
latest_pending_subchannel_list_->num_subchannels() == 0) { channel_control_helper()->UpdateState(
if (latest_pending_subchannel_list_->num_subchannels() == 0) { GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(error),
grpc_connectivity_state_set( UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
&state_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE, subchannel_list_ = std::move(latest_pending_subchannel_list_);
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"), } else if (subchannel_list_ == nullptr) {
"rr_update_empty"); // If there is no current list, immediately promote the new list to
} // the current list and start watching it.
subchannel_list_ = std::move(latest_pending_subchannel_list_); subchannel_list_ = std::move(latest_pending_subchannel_list_);
subchannel_list_->StartWatchingLocked();
} else { } else {
// If we've started picking, start watching the new list. // Start watching the pending list. It will get swapped into the
// current list when it reports READY.
latest_pending_subchannel_list_->StartWatchingLocked(); latest_pending_subchannel_list_->StartWatchingLocked();
} }
} }

@ -232,7 +232,7 @@ class SubchannelList : public InternallyRefCounted<SubchannelListType> {
protected: protected:
SubchannelList(LoadBalancingPolicy* policy, TraceFlag* tracer, SubchannelList(LoadBalancingPolicy* policy, TraceFlag* tracer,
const ServerAddressList& addresses, grpc_combiner* combiner, const ServerAddressList& addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory, LoadBalancingPolicy::ChannelControlHelper* helper,
const grpc_channel_args& args); const grpc_channel_args& args);
virtual ~SubchannelList(); virtual ~SubchannelList();
@ -486,7 +486,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList( SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
LoadBalancingPolicy* policy, TraceFlag* tracer, LoadBalancingPolicy* policy, TraceFlag* tracer,
const ServerAddressList& addresses, grpc_combiner* combiner, const ServerAddressList& addresses, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory, LoadBalancingPolicy::ChannelControlHelper* helper,
const grpc_channel_args& args) const grpc_channel_args& args)
: InternallyRefCounted<SubchannelListType>(tracer), : InternallyRefCounted<SubchannelListType>(tracer),
policy_(policy), policy_(policy),
@ -509,12 +509,8 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
GRPC_ARG_INHIBIT_HEALTH_CHECKING}; GRPC_ARG_INHIBIT_HEALTH_CHECKING};
// Create a subchannel for each address. // Create a subchannel for each address.
for (size_t i = 0; i < addresses.size(); i++) { for (size_t i = 0; i < addresses.size(); i++) {
// If there were any balancer addresses, we would have chosen grpclb
// policy, which does not use a SubchannelList.
GPR_ASSERT(!addresses[i].IsBalancer()); GPR_ASSERT(!addresses[i].IsBalancer());
InlinedVector<grpc_arg, 4> args_to_add; InlinedVector<grpc_arg, 3> args_to_add;
args_to_add.emplace_back(
SubchannelPoolInterface::CreateChannelArg(policy_->subchannel_pool()));
const size_t subchannel_address_arg_index = args_to_add.size(); const size_t subchannel_address_arg_index = args_to_add.size();
args_to_add.emplace_back( args_to_add.emplace_back(
Subchannel::CreateSubchannelAddressArg(&addresses[i].address())); Subchannel::CreateSubchannelAddressArg(&addresses[i].address()));
@ -527,8 +523,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
&args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove),
args_to_add.data(), args_to_add.size()); args_to_add.data(), args_to_add.size());
gpr_free(args_to_add[subchannel_address_arg_index].value.string); gpr_free(args_to_add[subchannel_address_arg_index].value.string);
Subchannel* subchannel = grpc_client_channel_factory_create_subchannel( Subchannel* subchannel = helper->CreateSubchannel(*new_args);
client_channel_factory, new_args);
grpc_channel_args_destroy(new_args); grpc_channel_args_destroy(new_args);
if (subchannel == nullptr) { if (subchannel == nullptr) {
// Subchannel could not be created. // Subchannel could not be created.

@ -26,14 +26,13 @@
/// channel that uses pick_first to select from the list of balancer /// channel that uses pick_first to select from the list of balancer
/// addresses. /// addresses.
/// ///
/// The first time the xDS policy gets a request for a pick or to exit the idle /// When we get our initial update, we instantiate the internal *streaming*
/// state, \a StartPickingLocked() is called. This method is responsible for /// call to the LB server (whichever address pick_first chose). The call
/// instantiating the internal *streaming* call to the LB server (whichever /// will be complete when either the balancer sends status or when we cancel
/// address pick_first chose). The call will be complete when either the /// the call (e.g., because we are shutting down). In needed, we retry the
/// balancer sends status or when we cancel the call (e.g., because we are /// call. If we received at least one valid message from the server, a new
/// shutting down). In needed, we retry the call. If we received at least one /// call attempt will be made immediately; otherwise, we apply back-off
/// valid message from the server, a new call attempt will be made immediately; /// delays between attempts.
/// otherwise, we apply back-off delays between attempts.
/// ///
/// We maintain an internal child policy (round_robin) instance for distributing /// We maintain an internal child policy (round_robin) instance for distributing
/// requests across backends. Whenever we receive a new serverlist from /// requests across backends. Whenever we receive a new serverlist from
@ -70,7 +69,6 @@
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include "src/core/ext/filters/client_channel/client_channel.h" #include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h" #include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h" #include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h" #include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h"
@ -125,48 +123,12 @@ class XdsLb : public LoadBalancingPolicy {
void UpdateLocked(const grpc_channel_args& args, void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override; grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
void CancelPickLocked(PickState* pick, grpc_error* error) override;
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) override;
void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
grpc_closure* closure) override;
grpc_connectivity_state CheckConnectivityLocked(
grpc_error** connectivity_error) override;
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override;
void ExitIdleLocked() override;
void ResetBackoffLocked() override; void ResetBackoffLocked() override;
void FillChildRefsForChannelz( void FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels, channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) override; channelz::ChildRefsList* child_channels) override;
private: private:
/// Linked list of pending pick requests. It stores all information needed to
/// eventually call pick() on them. They mainly stay pending waiting for the
/// child policy to be created.
///
/// Note that when a pick is sent to the child policy, we inject our own
/// on_complete callback, so that we can intercept the result before
/// invoking the original on_complete callback. This allows us to set the
/// LB token metadata and add client_stats to the call context.
/// See \a pending_pick_complete() for details.
struct PendingPick {
// The xds lb instance that created the wrapping. This instance is not
// owned; reference counts are untouched. It's used only for logging
// purposes.
XdsLb* xdslb_policy;
// The original pick.
PickState* pick;
// Our on_complete closure and the original one.
grpc_closure on_complete;
grpc_closure* original_on_complete;
// Stats for client-side load reporting.
RefCountedPtr<XdsLbClientStats> client_stats;
// Next pending pick.
PendingPick* next = nullptr;
};
/// Contains a call to the LB server and all the data related to the call. /// Contains a call to the LB server and all the data related to the call.
class BalancerCallState : public InternallyRefCounted<BalancerCallState> { class BalancerCallState : public InternallyRefCounted<BalancerCallState> {
public: public:
@ -241,11 +203,41 @@ class XdsLb : public LoadBalancingPolicy {
grpc_closure client_load_report_closure_; grpc_closure client_load_report_closure_;
}; };
class Picker : public SubchannelPicker {
public:
Picker(UniquePtr<SubchannelPicker> child_picker,
RefCountedPtr<XdsLbClientStats> client_stats)
: child_picker_(std::move(child_picker)),
client_stats_(std::move(client_stats)) {}
PickResult Pick(PickState* pick, grpc_error** error) override;
private:
UniquePtr<SubchannelPicker> child_picker_;
RefCountedPtr<XdsLbClientStats> client_stats_;
};
class Helper : public ChannelControlHelper {
public:
explicit Helper(RefCountedPtr<XdsLb> parent) : parent_(std::move(parent)) {}
Subchannel* CreateSubchannel(const grpc_channel_args& args) override;
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
private:
RefCountedPtr<XdsLb> parent_;
};
~XdsLb(); ~XdsLb();
void ShutdownLocked() override; void ShutdownLocked() override;
// Helper function used in ctor and UpdateLocked(). // Helper function used in UpdateLocked().
void ProcessChannelArgsLocked(const grpc_channel_args& args); void ProcessChannelArgsLocked(const grpc_channel_args& args);
// Parses the xds config given the JSON node of the first child of XdsConfig. // Parses the xds config given the JSON node of the first child of XdsConfig.
@ -255,7 +247,6 @@ class XdsLb : public LoadBalancingPolicy {
void ParseLbConfig(grpc_json* xds_config_json); void ParseLbConfig(grpc_json* xds_config_json);
// Methods for dealing with the balancer channel and call. // Methods for dealing with the balancer channel and call.
void StartPickingLocked();
void StartBalancerCallLocked(); void StartBalancerCallLocked();
static void OnFallbackTimerLocked(void* arg, grpc_error* error); static void OnFallbackTimerLocked(void* arg, grpc_error* error);
void StartBalancerCallRetryTimerLocked(); void StartBalancerCallRetryTimerLocked();
@ -263,24 +254,10 @@ class XdsLb : public LoadBalancingPolicy {
static void OnBalancerChannelConnectivityChangedLocked(void* arg, static void OnBalancerChannelConnectivityChangedLocked(void* arg,
grpc_error* error); grpc_error* error);
// Pending pick methods.
static void PendingPickCleanup(PendingPick* pp);
PendingPick* PendingPickCreate(PickState* pick);
void AddPendingPick(PendingPick* pp);
static void OnPendingPickComplete(void* arg, grpc_error* error);
// Methods for dealing with the child policy. // Methods for dealing with the child policy.
void CreateOrUpdateChildPolicyLocked(); void CreateOrUpdateChildPolicyLocked();
grpc_channel_args* CreateChildPolicyArgsLocked(); grpc_channel_args* CreateChildPolicyArgsLocked();
void CreateChildPolicyLocked(const char* name, Args args); void CreateChildPolicyLocked(const char* name, Args args);
bool PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
grpc_error** error);
void UpdateConnectivityStateFromChildPolicyLocked(
grpc_error* child_state_error);
static void OnChildPolicyConnectivityChangedLocked(void* arg,
grpc_error* error);
static void OnChildPolicyRequestReresolutionLocked(void* arg,
grpc_error* error);
// Who the client is trying to communicate with. // Who the client is trying to communicate with.
const char* server_name_ = nullptr; const char* server_name_ = nullptr;
@ -292,9 +269,7 @@ class XdsLb : public LoadBalancingPolicy {
grpc_channel_args* args_ = nullptr; grpc_channel_args* args_ = nullptr;
// Internal state. // Internal state.
bool started_picking_ = false;
bool shutting_down_ = false; bool shutting_down_ = false;
grpc_connectivity_state_tracker state_tracker_;
// The channel for communicating with the LB server. // The channel for communicating with the LB server.
grpc_channel* lb_channel_ = nullptr; grpc_channel* lb_channel_ = nullptr;
@ -337,17 +312,91 @@ class XdsLb : public LoadBalancingPolicy {
grpc_timer lb_fallback_timer_; grpc_timer lb_fallback_timer_;
grpc_closure lb_on_fallback_; grpc_closure lb_on_fallback_;
// Pending picks that are waiting on the xDS policy's connectivity.
PendingPick* pending_picks_ = nullptr;
// The policy to use for the backends. // The policy to use for the backends.
OrphanablePtr<LoadBalancingPolicy> child_policy_; OrphanablePtr<LoadBalancingPolicy> child_policy_;
UniquePtr<char> child_policy_json_string_; UniquePtr<char> child_policy_json_string_;
grpc_connectivity_state child_connectivity_state_;
grpc_closure on_child_connectivity_changed_;
grpc_closure on_child_request_reresolution_;
}; };
//
// XdsLb::Picker
//
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<XdsLbClientStats*>(arg)->Unref();
}
XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick,
grpc_error** error) {
// TODO(roth): Add support for drop handling.
// Forward pick to child policy.
PickResult result = child_picker_->Pick(pick, error);
// If pick succeeded, add client stats.
if (result == PickResult::PICK_COMPLETE &&
pick->connected_subchannel != nullptr && client_stats_ != nullptr) {
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
client_stats_->Ref().release();
pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
}
return result;
}
//
// XdsLb::Helper
//
Subchannel* XdsLb::Helper::CreateSubchannel(const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* XdsLb::Helper::CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) {
if (parent_->shutting_down_) return nullptr;
return parent_->channel_control_helper()->CreateChannel(target, type, args);
}
void XdsLb::Helper::UpdateState(grpc_connectivity_state state,
grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) {
if (parent_->shutting_down_) {
GRPC_ERROR_UNREF(state_error);
return;
}
// TODO(juanlishen): When in fallback mode, pass the child picker
// through without wrapping it. (Or maybe use a different helper for
// the fallback policy?)
RefCountedPtr<XdsLbClientStats> client_stats;
if (parent_->lb_calld_ != nullptr &&
parent_->lb_calld_->client_stats() != nullptr) {
client_stats = parent_->lb_calld_->client_stats()->Ref();
}
parent_->channel_control_helper()->UpdateState(
state, state_error,
UniquePtr<SubchannelPicker>(
New<Picker>(std::move(picker), std::move(client_stats))));
}
void XdsLb::Helper::RequestReresolution() {
if (parent_->shutting_down_) return;
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Re-resolution requested from the internal RR policy "
"(%p).",
parent_.get(), parent_->child_policy_.get());
}
// If we are talking to a balancer, we expect to get updated addresses
// from the balancer, so we can ignore the re-resolution request from
// the RR policy. Otherwise, pass the re-resolution request up to the
// channel.
if (parent_->lb_calld_ == nullptr ||
!parent_->lb_calld_->seen_initial_response()) {
parent_->channel_control_helper()->RequestReresolution();
}
}
// //
// serverlist parsing code // serverlist parsing code
// //
@ -709,7 +758,7 @@ void XdsLb::BalancerCallState::OnBalancerMessageReceivedLocked(
// serverlist returned from the current LB call. // serverlist returned from the current LB call.
if (lb_calld->client_stats_report_interval_ > 0 && if (lb_calld->client_stats_report_interval_ > 0 &&
lb_calld->client_stats_ == nullptr) { lb_calld->client_stats_ == nullptr) {
lb_calld->client_stats_.reset(New<XdsLbClientStats>()); lb_calld->client_stats_ = MakeRefCounted<XdsLbClientStats>();
// TODO(roth): We currently track this ref manually. Once the // TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along // ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback. // with the callback.
@ -792,13 +841,13 @@ void XdsLb::BalancerCallState::OnBalancerStatusReceivedLocked(
lb_calld->lb_call_, grpc_error_string(error)); lb_calld->lb_call_, grpc_error_string(error));
gpr_free(status_details); gpr_free(status_details);
} }
xdslb_policy->TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_NONE);
// If this lb_calld is still in use, this call ended because of a failure so // If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this // we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required. // call and no further action is required.
if (lb_calld == xdslb_policy->lb_calld_.get()) { if (lb_calld == xdslb_policy->lb_calld_.get()) {
xdslb_policy->lb_calld_.reset(); xdslb_policy->lb_calld_.reset();
GPR_ASSERT(!xdslb_policy->shutting_down_); GPR_ASSERT(!xdslb_policy->shutting_down_);
xdslb_policy->channel_control_helper()->RequestReresolution();
if (lb_calld->seen_initial_response_) { if (lb_calld->seen_initial_response_) {
// If we lose connection to the LB server, reset the backoff and restart // If we lose connection to the LB server, reset the backoff and restart
// the LB call immediately. // the LB call immediately.
@ -919,13 +968,6 @@ XdsLb::XdsLb(LoadBalancingPolicy::Args args)
GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_, GRPC_CLOSURE_INIT(&lb_channel_on_connectivity_changed_,
&XdsLb::OnBalancerChannelConnectivityChangedLocked, this, &XdsLb::OnBalancerChannelConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner)); grpc_combiner_scheduler(args.combiner));
GRPC_CLOSURE_INIT(&on_child_connectivity_changed_,
&XdsLb::OnChildPolicyConnectivityChangedLocked, this,
grpc_combiner_scheduler(args.combiner));
GRPC_CLOSURE_INIT(&on_child_request_reresolution_,
&XdsLb::OnChildPolicyRequestReresolutionLocked, this,
grpc_combiner_scheduler(args.combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, "xds");
// Record server name. // Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(arg); const char* server_uri = grpc_channel_arg_get_string(arg);
@ -946,25 +988,18 @@ XdsLb::XdsLb(LoadBalancingPolicy::Args args)
arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS); arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS);
lb_fallback_timeout_ms_ = grpc_channel_arg_get_integer( lb_fallback_timeout_ms_ = grpc_channel_arg_get_integer(
arg, {GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX}); arg, {GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX});
// Parse the LB config.
ParseLbConfig(args.lb_config);
// Process channel args.
ProcessChannelArgsLocked(*args.args);
} }
XdsLb::~XdsLb() { XdsLb::~XdsLb() {
GPR_ASSERT(pending_picks_ == nullptr);
gpr_mu_destroy(&lb_channel_mu_); gpr_mu_destroy(&lb_channel_mu_);
gpr_free((void*)server_name_); gpr_free((void*)server_name_);
grpc_channel_args_destroy(args_); grpc_channel_args_destroy(args_);
grpc_connectivity_state_destroy(&state_tracker_);
if (serverlist_ != nullptr) { if (serverlist_ != nullptr) {
xds_grpclb_destroy_serverlist(serverlist_); xds_grpclb_destroy_serverlist(serverlist_);
} }
} }
void XdsLb::ShutdownLocked() { void XdsLb::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
shutting_down_ = true; shutting_down_ = true;
lb_calld_.reset(); lb_calld_.reset();
if (retry_timer_callback_pending_) { if (retry_timer_callback_pending_) {
@ -974,7 +1009,6 @@ void XdsLb::ShutdownLocked() {
grpc_timer_cancel(&lb_fallback_timer_); grpc_timer_cancel(&lb_fallback_timer_);
} }
child_policy_.reset(); child_policy_.reset();
TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_CANCELLED);
// We destroy the LB channel here instead of in our destructor because // We destroy the LB channel here instead of in our destructor because
// destroying the channel triggers a last callback to // destroying the channel triggers a last callback to
// OnBalancerChannelConnectivityChangedLocked(), and we need to be // OnBalancerChannelConnectivityChangedLocked(), and we need to be
@ -985,115 +1019,12 @@ void XdsLb::ShutdownLocked() {
lb_channel_ = nullptr; lb_channel_ = nullptr;
gpr_mu_unlock(&lb_channel_mu_); gpr_mu_unlock(&lb_channel_mu_);
} }
grpc_connectivity_state_set(&state_tracker_, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_REF(error), "xds_shutdown");
// Clear pending picks.
PendingPick* pp;
while ((pp = pending_picks_) != nullptr) {
pending_picks_ = pp->next;
pp->pick->connected_subchannel.reset();
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
} }
// //
// public methods // public methods
// //
void XdsLb::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
PendingPick* pp;
while ((pp = pending_picks_) != nullptr) {
pending_picks_ = pp->next;
pp->pick->on_complete = pp->original_on_complete;
grpc_error* error = GRPC_ERROR_NONE;
if (new_policy->PickLocked(pp->pick, &error)) {
// Synchronous return; schedule closure.
GRPC_CLOSURE_SCHED(pp->pick->on_complete, error);
}
Delete(pp);
}
}
// Cancel a specific pending pick.
//
// A pick progresses as follows:
// - If there's a child policy available, it'll be handed over to child policy
// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
// child policy's responsibility. For cancellations, that implies the pick
// needs to be also cancelled by the child policy instance.
// - Otherwise, without a child policy instance, picks stay pending at this
// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelPickLocked(PickState* pick, grpc_error* error) {
PendingPick* pp = pending_picks_;
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if (pp->pick == pick) {
pick->connected_subchannel.reset();
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pp->next = pending_picks_;
pending_picks_ = pp;
}
pp = next;
}
if (child_policy_ != nullptr) {
child_policy_->CancelPickLocked(pick, GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
// Cancel all pending picks.
//
// A pick progresses as follows:
// - If there's a child policy available, it'll be handed over to child policy
// (in CreateChildPolicyLocked()). From that point onwards, it'll be the
// child policy's responsibility. For cancellations, that implies the pick
// needs to be also cancelled by the child policy instance.
// - Otherwise, without a child policy instance, picks stay pending at this
// policy's level (xds), inside the pending_picks_ list. To cancel these,
// we invoke the completion closure and set the pick's connected
// subchannel to nullptr right here.
void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) {
PendingPick* pp = pending_picks_;
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if ((*pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick Cancelled", &error, 1));
} else {
pp->next = pending_picks_;
pending_picks_ = pp;
}
pp = next;
}
if (child_policy_ != nullptr) {
child_policy_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
initial_metadata_flags_eq,
GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
}
void XdsLb::ExitIdleLocked() {
if (!started_picking_) {
StartPickingLocked();
}
}
void XdsLb::ResetBackoffLocked() { void XdsLb::ResetBackoffLocked() {
if (lb_channel_ != nullptr) { if (lb_channel_ != nullptr) {
grpc_channel_reset_connect_backoff(lb_channel_); grpc_channel_reset_connect_backoff(lb_channel_);
@ -1103,36 +1034,6 @@ void XdsLb::ResetBackoffLocked() {
} }
} }
bool XdsLb::PickLocked(PickState* pick, grpc_error** error) {
PendingPick* pp = PendingPickCreate(pick);
bool pick_done = false;
if (child_policy_ != nullptr) {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] about to PICK from policy %p", this,
child_policy_.get());
}
pick_done = PickFromChildPolicyLocked(false /* force_async */, pp, error);
} else { // child_policy_ == NULL
if (pick->on_complete == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"No pick result available but synchronous result required.");
pick_done = true;
} else {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] No child policy. Adding to xds's pending picks",
this);
}
AddPendingPick(pp);
if (!started_picking_) {
StartPickingLocked();
}
pick_done = false;
}
}
return pick_done;
}
void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels, void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) { channelz::ChildRefsList* child_channels) {
// delegate to the child_policy_ to fill the children subchannels. // delegate to the child_policy_ to fill the children subchannels.
@ -1147,17 +1048,6 @@ void XdsLb::FillChildRefsForChannelz(channelz::ChildRefsList* child_subchannels,
} }
} }
grpc_connectivity_state XdsLb::CheckConnectivityLocked(
grpc_error** connectivity_error) {
return grpc_connectivity_state_get(&state_tracker_, connectivity_error);
}
void XdsLb::NotifyOnStateChangeLocked(grpc_connectivity_state* current,
grpc_closure* closure) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, current,
closure);
}
void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) { void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
const ServerAddressList* addresses = FindServerAddressListChannelArg(&args); const ServerAddressList* addresses = FindServerAddressListChannelArg(&args);
if (addresses == nullptr) { if (addresses == nullptr) {
@ -1185,9 +1075,8 @@ void XdsLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
char* uri_str; char* uri_str;
gpr_asprintf(&uri_str, "fake:///%s", server_name_); gpr_asprintf(&uri_str, "fake:///%s", server_name_);
gpr_mu_lock(&lb_channel_mu_); gpr_mu_lock(&lb_channel_mu_);
lb_channel_ = grpc_client_channel_factory_create_channel( lb_channel_ = channel_control_helper()->CreateChannel(
client_channel_factory(), uri_str, uri_str, GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, *lb_channel_args);
GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, lb_channel_args);
gpr_mu_unlock(&lb_channel_mu_); gpr_mu_unlock(&lb_channel_mu_);
GPR_ASSERT(lb_channel_ != nullptr); GPR_ASSERT(lb_channel_ != nullptr);
gpr_free(uri_str); gpr_free(uri_str);
@ -1230,6 +1119,7 @@ void XdsLb::ParseLbConfig(grpc_json* xds_config_json) {
} }
void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) { void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
const bool is_initial_update = lb_channel_ == nullptr;
ParseLbConfig(lb_config); ParseLbConfig(lb_config);
// TODO(juanlishen): Pass fallback policy config update after fallback policy // TODO(juanlishen): Pass fallback policy config update after fallback policy
// is added. // is added.
@ -1243,20 +1133,30 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
// TODO(vpowar): Handle the fallback_address changes when we add support for // TODO(vpowar): Handle the fallback_address changes when we add support for
// fallback in xDS. // fallback in xDS.
if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked(); if (child_policy_ != nullptr) CreateOrUpdateChildPolicyLocked();
// Start watching the LB channel connectivity for connection, if not // If this is the initial update, start the fallback timer.
// already doing so. if (is_initial_update) {
if (!watching_lb_channel_) { if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
Ref(DEBUG_LOCATION, "on_fallback_timer").release(); // Held by closure
GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
StartBalancerCallLocked();
} else if (!watching_lb_channel_) {
// If this is not the initial update and we're not already watching
// the LB channel's connectivity state, start a watch now. This
// ensures that we'll know when to switch to a new balancer call.
lb_channel_connectivity_ = grpc_channel_check_connectivity_state( lb_channel_connectivity_ = grpc_channel_check_connectivity_state(
lb_channel_, true /* try to connect */); lb_channel_, true /* try to connect */);
grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element( grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(lb_channel_)); grpc_channel_get_channel_stack(lb_channel_));
GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
watching_lb_channel_ = true; watching_lb_channel_ = true;
// TODO(roth): We currently track this ref manually. Once the // Ref held by closure.
// ClosureRef API is ready, we should pass the RefCountedPtr<> along Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release();
// with the callback.
auto self = Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity");
self.release();
grpc_client_channel_watch_connectivity_state( grpc_client_channel_watch_connectivity_state(
client_channel_elem, client_channel_elem,
grpc_polling_entity_create_from_pollset_set(interested_parties()), grpc_polling_entity_create_from_pollset_set(interested_parties()),
@ -1269,25 +1169,6 @@ void XdsLb::UpdateLocked(const grpc_channel_args& args, grpc_json* lb_config) {
// code for balancer channel and call // code for balancer channel and call
// //
void XdsLb::StartPickingLocked() {
// Start a timer to fall back.
if (lb_fallback_timeout_ms_ > 0 && serverlist_ == nullptr &&
!fallback_timer_callback_pending_) {
grpc_millis deadline = ExecCtx::Get()->Now() + lb_fallback_timeout_ms_;
// TODO(roth): We currently track this ref manually. Once the
// ClosureRef API is ready, we should pass the RefCountedPtr<> along
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_fallback_timer");
self.release();
GRPC_CLOSURE_INIT(&lb_on_fallback_, &XdsLb::OnFallbackTimerLocked, this,
grpc_combiner_scheduler(combiner()));
fallback_timer_callback_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
}
started_picking_ = true;
StartBalancerCallLocked();
}
void XdsLb::StartBalancerCallLocked() { void XdsLb::StartBalancerCallLocked() {
GPR_ASSERT(lb_channel_ != nullptr); GPR_ASSERT(lb_channel_ != nullptr);
if (shutting_down_) return; if (shutting_down_) return;
@ -1386,13 +1267,11 @@ void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
case GRPC_CHANNEL_IDLE: case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_READY: case GRPC_CHANNEL_READY:
xdslb_policy->lb_calld_.reset(); xdslb_policy->lb_calld_.reset();
if (xdslb_policy->started_picking_) { if (xdslb_policy->retry_timer_callback_pending_) {
if (xdslb_policy->retry_timer_callback_pending_) { grpc_timer_cancel(&xdslb_policy->lb_call_retry_timer_);
grpc_timer_cancel(&xdslb_policy->lb_call_retry_timer_);
}
xdslb_policy->lb_call_backoff_.Reset();
xdslb_policy->StartBalancerCallLocked();
} }
xdslb_policy->lb_call_backoff_.Reset();
xdslb_policy->StartBalancerCallLocked();
// Fall through. // Fall through.
case GRPC_CHANNEL_SHUTDOWN: case GRPC_CHANNEL_SHUTDOWN:
done: done:
@ -1402,90 +1281,10 @@ void XdsLb::OnBalancerChannelConnectivityChangedLocked(void* arg,
} }
} }
//
// PendingPick
//
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
static_cast<XdsLbClientStats*>(arg)->Unref();
}
void XdsLb::PendingPickCleanup(PendingPick* pp) {
// If connected_subchannel is nullptr, no pick has been made by the
// child policy (e.g., all addresses failed to connect).
if (pp->pick->connected_subchannel != nullptr) {
// Pass on client stats via context. Passes ownership of the reference.
if (pp->client_stats != nullptr) {
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].value =
pp->client_stats.release();
pp->pick->subchannel_call_context[GRPC_GRPCLB_CLIENT_STATS].destroy =
DestroyClientStats;
}
} else {
pp->client_stats.reset();
}
}
/* The \a on_complete closure passed as part of the pick requires keeping a
* reference to its associated child policy instance. We wrap this closure in
* order to unref the child policy instance upon its invocation */
void XdsLb::OnPendingPickComplete(void* arg, grpc_error* error) {
PendingPick* pp = static_cast<PendingPick*>(arg);
PendingPickCleanup(pp);
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
Delete(pp);
}
XdsLb::PendingPick* XdsLb::PendingPickCreate(PickState* pick) {
PendingPick* pp = New<PendingPick>();
pp->xdslb_policy = this;
pp->pick = pick;
GRPC_CLOSURE_INIT(&pp->on_complete, &XdsLb::OnPendingPickComplete, pp,
grpc_schedule_on_exec_ctx);
pp->original_on_complete = pick->on_complete;
pick->on_complete = &pp->on_complete;
return pp;
}
void XdsLb::AddPendingPick(PendingPick* pp) {
pp->next = pending_picks_;
pending_picks_ = pp;
}
// //
// code for interacting with the child policy // code for interacting with the child policy
// //
// Performs a pick over \a child_policy_. Given that a pick can return
// immediately (ignoring its completion callback), we need to perform the
// cleanups this callback would otherwise be responsible for.
// If \a force_async is true, then we will manually schedule the
// completion callback even if the pick is available immediately.
bool XdsLb::PickFromChildPolicyLocked(bool force_async, PendingPick* pp,
grpc_error** error) {
// Set client_stats.
if (lb_calld_ != nullptr && lb_calld_->client_stats() != nullptr) {
pp->client_stats = lb_calld_->client_stats()->Ref();
}
// Pick via the child policy.
bool pick_done = child_policy_->PickLocked(pp->pick, error);
if (pick_done) {
PendingPickCleanup(pp);
if (force_async) {
GRPC_CLOSURE_SCHED(pp->original_on_complete, *error);
*error = GRPC_ERROR_NONE;
pick_done = false;
}
Delete(pp);
}
// else, the pending pick will be registered and taken care of by the
// pending pick list inside the child policy. Eventually,
// OnPendingPickComplete() will be called, which will (among other
// things) add the LB token to the call's initial metadata.
return pick_done;
}
void XdsLb::CreateChildPolicyLocked(const char* name, Args args) { void XdsLb::CreateChildPolicyLocked(const char* name, Args args) {
GPR_ASSERT(child_policy_ == nullptr); GPR_ASSERT(child_policy_ == nullptr);
child_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( child_policy_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
@ -1494,42 +1293,11 @@ void XdsLb::CreateChildPolicyLocked(const char* name, Args args) {
gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a child policy", this); gpr_log(GPR_ERROR, "[xdslb %p] Failure creating a child policy", this);
return; return;
} }
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
auto self = Ref(DEBUG_LOCATION, "on_child_reresolution_requested");
self.release();
child_policy_->SetReresolutionClosureLocked(&on_child_request_reresolution_);
grpc_error* child_state_error = nullptr;
child_connectivity_state_ =
child_policy_->CheckConnectivityLocked(&child_state_error);
// Connectivity state is a function of the child policy updated/created.
UpdateConnectivityStateFromChildPolicyLocked(child_state_error);
// Add the xDS's interested_parties pollset_set to that of the newly created // Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on // child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call. // xDS LB, which in turn is tied to the application's call.
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(), grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties()); interested_parties());
// Subscribe to changes to the connectivity of the new child policy.
// TODO(roth): We currently track this ref manually. Once the new
// ClosureRef API is done, pass the RefCountedPtr<> along with the closure.
self = Ref(DEBUG_LOCATION, "on_child_connectivity_changed");
self.release();
child_policy_->NotifyOnStateChangeLocked(&child_connectivity_state_,
&on_child_connectivity_changed_);
child_policy_->ExitIdleLocked();
// Send pending picks to child policy.
PendingPick* pp;
while ((pp = pending_picks_)) {
pending_picks_ = pp->next;
if (grpc_lb_xds_trace.enabled()) {
gpr_log(
GPR_INFO,
"[xdslb %p] Pending pick about to (async) PICK from child policy %p",
this, child_policy_.get());
}
grpc_error* error = GRPC_ERROR_NONE;
PickFromChildPolicyLocked(true /* force_async */, pp, &error);
}
} }
grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() { grpc_channel_args* XdsLb::CreateChildPolicyArgsLocked() {
@ -1578,123 +1346,25 @@ void XdsLb::CreateOrUpdateChildPolicyLocked() {
child_policy_name = "round_robin"; child_policy_name = "round_robin";
} }
// TODO(juanlishen): Switch policy according to child_policy_config->key. // TODO(juanlishen): Switch policy according to child_policy_config->key.
if (child_policy_ != nullptr) { if (child_policy_ == nullptr) {
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] Updating the child policy %p", this,
child_policy_.get());
}
child_policy_->UpdateLocked(*args, child_policy_config);
} else {
LoadBalancingPolicy::Args lb_policy_args; LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner(); lb_policy_args.combiner = combiner();
lb_policy_args.client_channel_factory = client_channel_factory();
lb_policy_args.subchannel_pool = subchannel_pool()->Ref();
lb_policy_args.args = args; lb_policy_args.args = args;
lb_policy_args.lb_config = child_policy_config; lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(New<Helper>(Ref()));
CreateChildPolicyLocked(child_policy_name, std::move(lb_policy_args)); CreateChildPolicyLocked(child_policy_name, std::move(lb_policy_args));
if (grpc_lb_xds_trace.enabled()) { if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, "[xdslb %p] Created a new child policy %p", this, gpr_log(GPR_INFO, "[xdslb %p] Created a new child policy %p", this,
child_policy_.get()); child_policy_.get());
} }
} }
grpc_channel_args_destroy(args);
grpc_json_destroy(child_policy_json);
}
void XdsLb::OnChildPolicyRequestReresolutionLocked(void* arg,
grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_ || error != GRPC_ERROR_NONE) {
xdslb_policy->Unref(DEBUG_LOCATION, "on_child_reresolution_requested");
return;
}
if (grpc_lb_xds_trace.enabled()) { if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO, gpr_log(GPR_INFO, "[xdslb %p] Updating child policy %p", this,
"[xdslb %p] Re-resolution requested from child policy "
"(%p).",
xdslb_policy, xdslb_policy->child_policy_.get());
}
// If we are talking to a balancer, we expect to get updated addresses form
// the balancer, so we can ignore the re-resolution request from the child
// policy.
// Otherwise, handle the re-resolution request using the xds policy's
// original re-resolution closure.
if (xdslb_policy->lb_calld_ == nullptr ||
!xdslb_policy->lb_calld_->seen_initial_response()) {
xdslb_policy->TryReresolutionLocked(&grpc_lb_xds_trace, GRPC_ERROR_NONE);
}
// Give back the wrapper closure to the child policy.
xdslb_policy->child_policy_->SetReresolutionClosureLocked(
&xdslb_policy->on_child_request_reresolution_);
}
void XdsLb::UpdateConnectivityStateFromChildPolicyLocked(
grpc_error* child_state_error) {
const grpc_connectivity_state curr_glb_state =
grpc_connectivity_state_check(&state_tracker_);
/* The new connectivity status is a function of the previous one and the new
* input coming from the status of the child policy.
*
* current state (xds's)
* |
* v || I | C | R | TF | SD | <- new state (child policy's)
* ===++====+=====+=====+======+======+
* I || I | C | R | [I] | [I] |
* ---++----+-----+-----+------+------+
* C || I | C | R | [C] | [C] |
* ---++----+-----+-----+------+------+
* R || I | C | R | [R] | [R] |
* ---++----+-----+-----+------+------+
* TF || I | C | R | [TF] | [TF] |
* ---++----+-----+-----+------+------+
* SD || NA | NA | NA | NA | NA | (*)
* ---++----+-----+-----+------+------+
*
* A [STATE] indicates that the old child policy is kept. In those cases,
* STATE is the current state of xds, which is left untouched.
*
* In summary, if the new state is TRANSIENT_FAILURE or SHUTDOWN, stick to
* the previous child policy instance.
*
* Note that the status is never updated to SHUTDOWN as a result of calling
* this function. Only glb_shutdown() has the power to set that state.
*
* (*) This function mustn't be called during shutting down. */
GPR_ASSERT(curr_glb_state != GRPC_CHANNEL_SHUTDOWN);
switch (child_connectivity_state_) {
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN:
GPR_ASSERT(child_state_error != GRPC_ERROR_NONE);
break;
case GRPC_CHANNEL_IDLE:
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_READY:
GPR_ASSERT(child_state_error == GRPC_ERROR_NONE);
}
if (grpc_lb_xds_trace.enabled()) {
gpr_log(GPR_INFO,
"[xdslb %p] Setting xds's state to %s from child policy %p state.",
this, grpc_connectivity_state_name(child_connectivity_state_),
child_policy_.get()); child_policy_.get());
} }
grpc_connectivity_state_set(&state_tracker_, child_connectivity_state_, child_policy_->UpdateLocked(*args, child_policy_config);
child_state_error, grpc_channel_args_destroy(args);
"update_lb_connectivity_status_locked"); grpc_json_destroy(child_policy_json);
}
void XdsLb::OnChildPolicyConnectivityChangedLocked(void* arg,
grpc_error* error) {
XdsLb* xdslb_policy = static_cast<XdsLb*>(arg);
if (xdslb_policy->shutting_down_) {
xdslb_policy->Unref(DEBUG_LOCATION, "on_child_connectivity_changed");
return;
}
xdslb_policy->UpdateConnectivityStateFromChildPolicyLocked(
GRPC_ERROR_REF(error));
// Resubscribe. Reuse the "on_child_connectivity_changed" ref.
xdslb_policy->child_policy_->NotifyOnStateChangeLocked(
&xdslb_policy->child_connectivity_state_,
&xdslb_policy->on_child_connectivity_changed_);
} }
// //

@ -1,946 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/request_routing.h"
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/global_subchannel_pool.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/local_subchannel_pool.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_metadata.h"
namespace grpc_core {
//
// RequestRouter::Request::ResolverResultWaiter
//
// Handles waiting for a resolver result.
// Used only for the first call on an idle channel.
class RequestRouter::Request::ResolverResultWaiter {
public:
explicit ResolverResultWaiter(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
if (tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: deferring pick pending resolver "
"result",
request_router_, request);
}
// Add closure to be run when a resolver result is available.
GRPC_CLOSURE_INIT(&done_closure_, &DoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
AddToWaitingList();
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
private:
// Adds done_closure_ to
// request_router_->waiting_for_resolver_result_closures_.
void AddToWaitingList() {
grpc_closure_list_append(
&request_router_->waiting_for_resolver_result_closures_, &done_closure_,
GRPC_ERROR_NONE);
}
// Invoked when a resolver result is available.
static void DoneLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If CancelLocked() has already run, delete ourselves without doing
// anything. Note that the call stack may have already been destroyed,
// so it's not safe to access anything in state_.
if (GPR_UNLIKELY(self->finished_)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p: call cancelled before resolver result",
request_router);
}
Delete(self);
return;
}
// Otherwise, process the resolver result.
Request* request = self->request_;
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver failed to return data",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_, GRPC_ERROR_REF(error));
} else if (GPR_UNLIKELY(request_router->resolver_ == nullptr)) {
// Shutting down.
if (self->tracer_enabled_) {
gpr_log(GPR_INFO, "request_router=%p request=%p: resolver disconnected",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else if (GPR_UNLIKELY(request_router->lb_policy_ == nullptr)) {
// Transient resolver failure.
// If call has wait_for_ready=true, try again; otherwise, fail.
if (*request->pick_.initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=true; trying again",
request_router, request);
}
// Re-add ourselves to the waiting list.
self->AddToWaitingList();
// Return early so that we don't set finished_ to true below.
return;
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=false; failing",
request_router, request);
}
GRPC_CLOSURE_RUN(
request->on_route_done_,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned, doing LB "
"pick",
request_router, request);
}
request->ProcessServiceConfigAndStartLbPickLocked();
}
self->finished_ = true;
}
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If DoneLocked() has already run, delete ourselves without doing anything.
if (self->finished_) {
Delete(self);
return;
}
Request* request = self->request_;
// If we are being cancelled, immediately invoke on_route_done_
// to propagate the error back to the caller.
if (error != GRPC_ERROR_NONE) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling call waiting for "
"name resolution",
request_router, request);
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to run on_route_done_ here -- we are essentially
// calling it here instead of calling it in DoneLocked().
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
self->finished_ = true;
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure done_closure_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request::AsyncPickCanceller
//
// Handles the call combiner cancellation callback for an async LB pick.
class RequestRouter::Request::AsyncPickCanceller {
public:
explicit AsyncPickCanceller(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
GRPC_CALL_STACK_REF(request->owning_call_, "pick_callback_cancel");
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
void MarkFinishedLocked() {
finished_ = true;
GRPC_CALL_STACK_UNREF(request_->owning_call_, "pick_callback_cancel");
}
private:
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
AsyncPickCanceller* self = static_cast<AsyncPickCanceller*>(arg);
Request* request = self->request_;
RequestRouter* request_router = self->request_router_;
if (!self->finished_) {
// Note: request_router->lb_policy_ may have changed since we started our
// pick, in which case we will be cancelling the pick on a policy other
// than the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && request_router->lb_policy_ != nullptr) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling pick from LB "
"policy %p",
request_router, request, request_router->lb_policy_.get());
}
request_router->lb_policy_->CancelPickLocked(&request->pick_,
GRPC_ERROR_REF(error));
}
request->pick_canceller_ = nullptr;
GRPC_CALL_STACK_UNREF(request->owning_call_, "pick_callback_cancel");
}
Delete(self);
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request
//
RequestRouter::Request::Request(grpc_call_stack* owning_call,
grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data,
grpc_closure* on_route_done)
: owning_call_(owning_call),
call_combiner_(call_combiner),
pollent_(pollent),
apply_service_config_(apply_service_config),
apply_service_config_user_data_(apply_service_config_user_data),
on_route_done_(on_route_done) {
pick_.initial_metadata = send_initial_metadata;
pick_.initial_metadata_flags = send_initial_metadata_flags;
}
RequestRouter::Request::~Request() {
if (pick_.connected_subchannel != nullptr) {
pick_.connected_subchannel.reset();
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (pick_.subchannel_call_context[i].destroy != nullptr) {
pick_.subchannel_call_context[i].destroy(
pick_.subchannel_call_context[i].value);
}
}
}
// Invoked once resolver results are available.
void RequestRouter::Request::ProcessServiceConfigAndStartLbPickLocked() {
// Get service config data if needed.
if (!apply_service_config_(apply_service_config_user_data_)) return;
// Start LB pick.
StartLbPickLocked();
}
void RequestRouter::Request::MaybeAddCallToInterestedPartiesLocked() {
if (!pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = true;
grpc_polling_entity_add_to_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
void RequestRouter::Request::MaybeRemoveCallFromInterestedPartiesLocked() {
if (pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = false;
grpc_polling_entity_del_from_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
// Starts a pick on the LB policy.
void RequestRouter::Request::StartLbPickLocked() {
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: starting pick on lb_policy=%p",
request_router_, this, request_router_->lb_policy_.get());
}
GRPC_CLOSURE_INIT(&on_pick_done_, &LbPickDoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
pick_.on_complete = &on_pick_done_;
GRPC_CALL_STACK_REF(owning_call_, "pick_callback");
grpc_error* error = GRPC_ERROR_NONE;
const bool pick_done =
request_router_->lb_policy_->PickLocked(&pick_, &error);
if (pick_done) {
// Pick completed synchronously.
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed synchronously",
request_router_, this);
}
GRPC_CLOSURE_RUN(on_route_done_, error);
GRPC_CALL_STACK_UNREF(owning_call_, "pick_callback");
} else {
// Pick will be returned asynchronously.
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the LB policy can be done
// under it. It will be removed in LbPickDoneLocked().
MaybeAddCallToInterestedPartiesLocked();
// Request notification on call cancellation.
// We allocate a separate object to track cancellation, since the
// cancellation closure might still be pending when we need to reuse
// the memory in which this Request object is stored for a subsequent
// retry attempt.
pick_canceller_ = New<AsyncPickCanceller>(this);
}
}
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes on_route_done_.
void RequestRouter::Request::LbPickDoneLocked(void* arg, grpc_error* error) {
Request* self = static_cast<Request*>(arg);
RequestRouter* request_router = self->request_router_;
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed asynchronously",
request_router, self);
}
self->MaybeRemoveCallFromInterestedPartiesLocked();
if (self->pick_canceller_ != nullptr) {
self->pick_canceller_->MarkFinishedLocked();
}
GRPC_CLOSURE_RUN(self->on_route_done_, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(self->owning_call_, "pick_callback");
}
//
// RequestRouter::LbConnectivityWatcher
//
class RequestRouter::LbConnectivityWatcher {
public:
LbConnectivityWatcher(RequestRouter* request_router,
grpc_connectivity_state state,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
state_(state),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "LbConnectivityWatcher");
GRPC_CLOSURE_INIT(&on_changed_, &OnLbPolicyStateChangedLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->NotifyOnStateChangeLocked(&state_, &on_changed_);
}
~LbConnectivityWatcher() {
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "LbConnectivityWatcher");
}
private:
static void OnLbPolicyStateChangedLocked(void* arg, grpc_error* error) {
LbConnectivityWatcher* self = static_cast<LbConnectivityWatcher*>(arg);
// If the notification is not for the current policy, we're stale,
// so delete ourselves.
if (self->lb_policy_ != self->request_router_->lb_policy_.get()) {
Delete(self);
return;
}
// Otherwise, process notification.
if (self->request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: lb_policy=%p state changed to %s",
self->request_router_, self->lb_policy_,
grpc_connectivity_state_name(self->state_));
}
self->request_router_->SetConnectivityStateLocked(
self->state_, GRPC_ERROR_REF(error), "lb_changed");
// If shutting down, terminate watch.
if (self->state_ == GRPC_CHANNEL_SHUTDOWN) {
Delete(self);
return;
}
// Renew watch.
self->lb_policy_->NotifyOnStateChangeLocked(&self->state_,
&self->on_changed_);
}
RequestRouter* request_router_;
grpc_connectivity_state state_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure on_changed_;
};
//
// RequestRounter::ReresolutionRequestHandler
//
class RequestRouter::ReresolutionRequestHandler {
public:
ReresolutionRequestHandler(RequestRouter* request_router,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "ReresolutionRequestHandler");
GRPC_CLOSURE_INIT(&closure_, &OnRequestReresolutionLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->SetReresolutionClosureLocked(&closure_);
}
private:
static void OnRequestReresolutionLocked(void* arg, grpc_error* error) {
ReresolutionRequestHandler* self =
static_cast<ReresolutionRequestHandler*>(arg);
RequestRouter* request_router = self->request_router_;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
if (self->lb_policy_ != request_router->lb_policy_.get() ||
error != GRPC_ERROR_NONE || request_router->resolver_ == nullptr) {
GRPC_CHANNEL_STACK_UNREF(request_router->owning_stack_,
"ReresolutionRequestHandler");
Delete(self);
return;
}
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: started name re-resolving",
request_router);
}
request_router->resolver_->RequestReresolutionLocked();
// Give back the closure to the LB policy.
self->lb_policy_->SetReresolutionClosureLocked(&self->closure_);
}
RequestRouter* request_router_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure closure_;
};
//
// RequestRouter
//
RequestRouter::RequestRouter(
grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error)
: owning_stack_(owning_stack),
combiner_(combiner),
client_channel_factory_(client_channel_factory),
interested_parties_(interested_parties),
tracer_(tracer),
process_resolver_result_(process_resolver_result),
process_resolver_result_user_data_(process_resolver_result_user_data) {
// Get subchannel pool.
const grpc_arg* arg =
grpc_channel_args_find(args, GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL);
if (grpc_channel_arg_get_bool(arg, false)) {
subchannel_pool_ = MakeRefCounted<LocalSubchannelPool>();
} else {
subchannel_pool_ = GlobalSubchannelPool::instance();
}
GRPC_CLOSURE_INIT(&on_resolver_result_changed_,
&RequestRouter::OnResolverResultChangedLocked, this,
grpc_combiner_scheduler(combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"request_router");
grpc_channel_args* new_args = nullptr;
if (process_resolver_result == nullptr) {
grpc_arg arg = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0);
new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
}
resolver_ = ResolverRegistry::CreateResolver(
target_uri, (new_args == nullptr ? args : new_args), interested_parties_,
combiner_);
grpc_channel_args_destroy(new_args);
if (resolver_ == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
}
RequestRouter::~RequestRouter() {
if (resolver_ != nullptr) {
// The only way we can get here is if we never started resolving,
// because we take a ref to the channel stack when we start
// resolving and do not release it until the resolver callback is
// invoked after the resolver shuts down.
resolver_.reset();
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (client_channel_factory_ != nullptr) {
grpc_client_channel_factory_unref(client_channel_factory_);
}
grpc_connectivity_state_destroy(&state_tracker_);
}
namespace {
const char* GetChannelConnectivityStateChangeString(
grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "Channel state change to IDLE";
case GRPC_CHANNEL_CONNECTING:
return "Channel state change to CONNECTING";
case GRPC_CHANNEL_READY:
return "Channel state change to READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "Channel state change to TRANSIENT_FAILURE";
case GRPC_CHANNEL_SHUTDOWN:
return "Channel state change to SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
} // namespace
void RequestRouter::SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error,
const char* reason) {
if (lb_policy_ != nullptr) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Cancel picks with wait_for_ready=false.
lb_policy_->CancelMatchingPicksLocked(
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
// Cancel all picks.
lb_policy_->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
GRPC_ERROR_REF(error));
}
}
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: setting connectivity state to %s",
this, grpc_connectivity_state_name(state));
}
if (channelz_node_ != nullptr) {
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
GetChannelConnectivityStateChangeString(state)));
}
grpc_connectivity_state_set(&state_tracker_, state, error, reason);
}
void RequestRouter::StartResolvingLocked() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: starting name resolution", this);
}
GPR_ASSERT(!started_resolving_);
started_resolving_ = true;
GRPC_CHANNEL_STACK_REF(owning_stack_, "resolver");
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_);
}
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
void RequestRouter::OnResolverShutdownLocked(grpc_error* error) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down", this);
}
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (resolver_ != nullptr) {
// This should never happen; it can only be triggered by a resolver
// implementation spotaneously deciding to report shutdown without
// being orphaned. This code is included just to be defensive.
if (tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: spontaneous shutdown from resolver %p", this,
resolver_.get());
}
resolver_.reset();
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver spontaneous shutdown", &error, 1),
"resolver_spontaneous_shutdown");
}
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "resolver");
grpc_channel_args_destroy(resolver_result_);
resolver_result_ = nullptr;
GRPC_ERROR_UNREF(error);
}
// Creates a new LB policy, replacing any previous one.
// If the new policy is created successfully, sets *connectivity_state and
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
void RequestRouter::CreateNewLbPolicyLocked(
const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner_;
lb_policy_args.client_channel_factory = client_channel_factory_;
lb_policy_args.subchannel_pool = subchannel_pool_;
lb_policy_args.args = resolver_result_;
lb_policy_args.lb_config = lb_config;
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(lb_policy_name,
lb_policy_args);
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
} else {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, new_lb_policy.get());
}
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
// Swap out the LB policy and update the fds in interested_parties_.
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_->HandOffPendingPicksLocked(new_lb_policy.get());
}
lb_policy_ = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
// Create re-resolution request handler for the new LB policy. It
// will delete itself when no longer needed.
New<ReresolutionRequestHandler>(this, lb_policy_.get(), owning_stack_,
combiner_);
// Get the new LB policy's initial connectivity state and start a
// connectivity watch.
GRPC_ERROR_UNREF(*connectivity_error);
*connectivity_state =
lb_policy_->CheckConnectivityLocked(connectivity_error);
if (exit_idle_when_lb_policy_arrives_) {
lb_policy_->ExitIdleLocked();
exit_idle_when_lb_policy_arrives_ = false;
}
// Create new watcher. It will delete itself when done.
New<LbConnectivityWatcher>(this, *connectivity_state, lb_policy_.get(),
owning_stack_, combiner_);
}
}
void RequestRouter::MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings) {
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result_);
const bool resolution_contains_addresses =
addresses != nullptr && addresses->size() > 0;
if (!resolution_contains_addresses &&
previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became empty"));
} else if (resolution_contains_addresses &&
!previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became non-empty"));
}
previous_resolution_contained_addresses_ = resolution_contains_addresses;
}
void RequestRouter::ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const {
if (!trace_strings->empty()) {
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
bool is_first = 1;
for (size_t i = 0; i < trace_strings->size(); ++i) {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
is_first = false;
gpr_strvec_add(&v, (*trace_strings)[i]);
}
char* flat;
size_t flat_len = 0;
flat = gpr_strvec_flatten(&v, &flat_len);
channelz_node_->AddTraceEvent(channelz::ChannelTrace::Severity::Info,
grpc_slice_new(flat, flat_len, gpr_free));
gpr_strvec_destroy(&v);
}
}
// Callback invoked when a resolver result is available.
void RequestRouter::OnResolverResultChangedLocked(void* arg,
grpc_error* error) {
RequestRouter* self = static_cast<RequestRouter*>(arg);
if (self->tracer_->enabled()) {
const char* disposition =
self->resolver_result_ != nullptr
? ""
: (error == GRPC_ERROR_NONE ? " (transient error)"
: " (resolver shutdown)");
gpr_log(GPR_INFO,
"request_router=%p: got resolver result: resolver_result=%p "
"error=%s%s",
self, self->resolver_result_, grpc_error_string(error),
disposition);
}
// Handle shutdown.
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) {
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error));
return;
}
// Data used to set the channel's connectivity state.
bool set_connectivity_state = true;
// We only want to trace the address resolution in the follow cases:
// (a) Address resolution resulted in service config change.
// (b) Address resolution that causes number of backends to go from
// zero to non-zero.
// (c) Address resolution that causes number of backends to go from
// non-zero to zero.
// (d) Address resolution that causes a new LB policy to be created.
//
// we track a list of strings to eventually be concatenated and traced.
TraceStringVector trace_strings;
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* connectivity_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
// resolver_result_ will be null in the case of a transient
// resolution error. In that case, we don't have any new result to
// process, which means that we keep using the previous result (if any).
if (self->resolver_result_ == nullptr) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: resolver transient failure", self);
}
// Don't override connectivity state if we already have an LB policy.
if (self->lb_policy_ != nullptr) set_connectivity_state = false;
} else {
// Parse the resolver result.
const char* lb_policy_name = nullptr;
grpc_json* lb_policy_config = nullptr;
const bool service_config_changed = self->process_resolver_result_(
self->process_resolver_result_user_data_, *self->resolver_result_,
&lb_policy_name, &lb_policy_config);
GPR_ASSERT(lb_policy_name != nullptr);
// Check to see if we're already using the right LB policy.
const bool lb_policy_name_changed =
self->lb_policy_ == nullptr ||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0;
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: updating existing LB policy \"%s\" (%p)",
self, lb_policy_name, self->lb_policy_.get());
}
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config,
&connectivity_state, &connectivity_error,
&trace_strings);
}
// Add channel trace event.
if (self->channelz_node_ != nullptr) {
if (service_config_changed) {
// TODO(ncteisen): might be worth somehow including a snippet of the
// config in the trace, at the risk of bloating the trace logs.
trace_strings.push_back(gpr_strdup("Service config changed"));
}
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings);
self->ConcatenateAndAddChannelTraceLocked(&trace_strings);
}
// Clean up.
grpc_channel_args_destroy(self->resolver_result_);
self->resolver_result_ = nullptr;
}
// Set the channel's connectivity state if needed.
if (set_connectivity_state) {
self->SetConnectivityStateLocked(connectivity_state, connectivity_error,
"resolver_result");
} else {
GRPC_ERROR_UNREF(connectivity_error);
}
// Invoke closures that were waiting for results and renew the watch.
GRPC_CLOSURE_LIST_SCHED(&self->waiting_for_resolver_result_closures_);
self->resolver_->NextLocked(&self->resolver_result_,
&self->on_resolver_result_changed_);
}
void RequestRouter::RouteCallLocked(Request* request) {
GPR_ASSERT(request->pick_.connected_subchannel == nullptr);
request->request_router_ = this;
if (lb_policy_ != nullptr) {
// We already have resolver results, so process the service config
// and start an LB pick.
request->ProcessServiceConfigAndStartLbPickLocked();
} else if (resolver_ == nullptr) {
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else {
// We do not yet have an LB policy, so wait for a resolver result.
if (!started_resolving_) {
StartResolvingLocked();
}
// Create a new waiter, which will delete itself when done.
New<Request::ResolverResultWaiter>(request);
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the resolver can be done
// under it. It will be removed in LbPickDoneLocked().
request->MaybeAddCallToInterestedPartiesLocked();
}
}
void RequestRouter::ShutdownLocked(grpc_error* error) {
if (resolver_ != nullptr) {
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"disconnect");
resolver_.reset();
if (!started_resolving_) {
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_REF(error));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
}
GRPC_ERROR_UNREF(error);
}
grpc_connectivity_state RequestRouter::GetConnectivityState() {
return grpc_connectivity_state_check(&state_tracker_);
}
void RequestRouter::NotifyOnConnectivityStateChange(
grpc_connectivity_state* state, grpc_closure* closure) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, state,
closure);
}
void RequestRouter::ExitIdleLocked() {
if (lb_policy_ != nullptr) {
lb_policy_->ExitIdleLocked();
} else {
exit_idle_when_lb_policy_arrives_ = true;
if (!started_resolving_ && resolver_ != nullptr) {
StartResolvingLocked();
}
}
}
void RequestRouter::ResetConnectionBackoffLocked() {
if (resolver_ != nullptr) {
resolver_->ResetBackoffLocked();
resolver_->RequestReresolutionLocked();
}
if (lb_policy_ != nullptr) {
lb_policy_->ResetBackoffLocked();
}
}
} // namespace grpc_core

@ -1,181 +0,0 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
class RequestRouter {
public:
class Request {
public:
// Synchronous callback that applies the service config to a call.
// Returns false if the call should be failed.
typedef bool (*ApplyServiceConfigCallback)(void* user_data);
Request(grpc_call_stack* owning_call, grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data, grpc_closure* on_route_done);
~Request();
// TODO(roth): It seems a bit ugly to expose this member in a
// non-const way. Find a better API to avoid this.
LoadBalancingPolicy::PickState* pick() { return &pick_; }
private:
friend class RequestRouter;
class ResolverResultWaiter;
class AsyncPickCanceller;
void ProcessServiceConfigAndStartLbPickLocked();
void StartLbPickLocked();
static void LbPickDoneLocked(void* arg, grpc_error* error);
void MaybeAddCallToInterestedPartiesLocked();
void MaybeRemoveCallFromInterestedPartiesLocked();
// Populated by caller.
grpc_call_stack* owning_call_;
grpc_call_combiner* call_combiner_;
grpc_polling_entity* pollent_;
ApplyServiceConfigCallback apply_service_config_;
void* apply_service_config_user_data_;
grpc_closure* on_route_done_;
LoadBalancingPolicy::PickState pick_;
// Internal state.
RequestRouter* request_router_ = nullptr;
bool pollent_added_to_interested_parties_ = false;
grpc_closure on_pick_done_;
AsyncPickCanceller* pick_canceller_ = nullptr;
};
// Synchronous callback that takes the service config JSON string and
// LB policy name.
// Returns true if the service config has changed since the last result.
typedef bool (*ProcessResolverResultCallback)(void* user_data,
const grpc_channel_args& args,
const char** lb_policy_name,
grpc_json** lb_policy_config);
RequestRouter(grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error);
~RequestRouter();
void set_channelz_node(channelz::ClientChannelNode* channelz_node) {
channelz_node_ = channelz_node;
}
void RouteCallLocked(Request* request);
// TODO(roth): Add methods to cancel picks.
void ShutdownLocked(grpc_error* error);
void ExitIdleLocked();
void ResetConnectionBackoffLocked();
grpc_connectivity_state GetConnectivityState();
void NotifyOnConnectivityStateChange(grpc_connectivity_state* state,
grpc_closure* closure);
LoadBalancingPolicy* lb_policy() const { return lb_policy_.get(); }
private:
using TraceStringVector = InlinedVector<char*, 3>;
class ReresolutionRequestHandler;
class LbConnectivityWatcher;
void StartResolvingLocked();
void OnResolverShutdownLocked(grpc_error* error);
void CreateNewLbPolicyLocked(const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error,
TraceStringVector* trace_strings);
void MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings);
void ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const;
static void OnResolverResultChangedLocked(void* arg, grpc_error* error);
void SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error, const char* reason);
// Passed in from caller at construction time.
grpc_channel_stack* owning_stack_;
grpc_combiner* combiner_;
grpc_client_channel_factory* client_channel_factory_;
grpc_pollset_set* interested_parties_;
TraceFlag* tracer_;
channelz::ClientChannelNode* channelz_node_ = nullptr;
// Resolver and associated state.
OrphanablePtr<Resolver> resolver_;
ProcessResolverResultCallback process_resolver_result_;
void* process_resolver_result_user_data_;
bool started_resolving_ = false;
grpc_channel_args* resolver_result_ = nullptr;
bool previous_resolution_contained_addresses_ = false;
grpc_closure_list waiting_for_resolver_result_closures_;
grpc_closure on_resolver_result_changed_;
// LB policy and associated state.
OrphanablePtr<LoadBalancingPolicy> lb_policy_;
bool exit_idle_when_lb_policy_arrives_ = false;
// Subchannel pool to pass to LB policy.
RefCountedPtr<SubchannelPoolInterface> subchannel_pool_;
grpc_connectivity_state_tracker state_tracker_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H */

@ -0,0 +1,452 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/resolving_lb_policy.h"
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_metadata.h"
namespace grpc_core {
//
// ResolvingLoadBalancingPolicy::ResolvingControlHelper
//
class ResolvingLoadBalancingPolicy::ResolvingControlHelper
: public LoadBalancingPolicy::ChannelControlHelper {
public:
explicit ResolvingControlHelper(
RefCountedPtr<ResolvingLoadBalancingPolicy> parent)
: parent_(std::move(parent)) {}
Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* CreateChannel(const char* target, grpc_client_channel_type type,
const grpc_channel_args& args) override {
if (parent_->resolver_ == nullptr) return nullptr; // Shutting down.
return parent_->channel_control_helper()->CreateChannel(target, type, args);
}
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override {
if (parent_->resolver_ == nullptr) {
// shutting down.
GRPC_ERROR_UNREF(state_error);
return;
}
parent_->channel_control_helper()->UpdateState(state, state_error,
std::move(picker));
}
void RequestReresolution() override {
if (parent_->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: started name re-resolving",
parent_.get());
}
if (parent_->resolver_ != nullptr) {
parent_->resolver_->RequestReresolutionLocked();
}
}
private:
RefCountedPtr<ResolvingLoadBalancingPolicy> parent_;
};
//
// ResolvingLoadBalancingPolicy
//
ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
Args args, TraceFlag* tracer, UniquePtr<char> target_uri,
UniquePtr<char> child_policy_name, grpc_json* child_lb_config,
grpc_error** error)
: LoadBalancingPolicy(std::move(args)),
tracer_(tracer),
target_uri_(std::move(target_uri)),
child_policy_name_(std::move(child_policy_name)),
child_lb_config_str_(grpc_json_dump_to_string(child_lb_config, 0)),
child_lb_config_(grpc_json_parse_string(child_lb_config_str_.get())) {
GPR_ASSERT(child_policy_name_ != nullptr);
// Don't fetch service config, since this ctor is for use in nested LB
// policies, not at the top level, and we only fetch the service
// config at the top level.
grpc_arg arg = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0);
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add(args.args, &arg, 1);
*error = Init(*new_args);
grpc_channel_args_destroy(new_args);
}
ResolvingLoadBalancingPolicy::ResolvingLoadBalancingPolicy(
Args args, TraceFlag* tracer, UniquePtr<char> target_uri,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, grpc_error** error)
: LoadBalancingPolicy(std::move(args)),
tracer_(tracer),
target_uri_(std::move(target_uri)),
process_resolver_result_(process_resolver_result),
process_resolver_result_user_data_(process_resolver_result_user_data) {
GPR_ASSERT(process_resolver_result != nullptr);
*error = Init(*args.args);
}
grpc_error* ResolvingLoadBalancingPolicy::Init(const grpc_channel_args& args) {
GRPC_CLOSURE_INIT(
&on_resolver_result_changed_,
&ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked, this,
grpc_combiner_scheduler(combiner()));
resolver_ = ResolverRegistry::CreateResolver(
target_uri_.get(), &args, interested_parties(), combiner());
if (resolver_ == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
// Return our picker to the channel.
channel_control_helper()->UpdateState(
GRPC_CHANNEL_IDLE, GRPC_ERROR_NONE,
UniquePtr<SubchannelPicker>(New<QueuePicker>(Ref())));
return GRPC_ERROR_NONE;
}
ResolvingLoadBalancingPolicy::~ResolvingLoadBalancingPolicy() {
GPR_ASSERT(resolver_ == nullptr);
GPR_ASSERT(lb_policy_ == nullptr);
grpc_json_destroy(child_lb_config_);
}
void ResolvingLoadBalancingPolicy::ShutdownLocked() {
if (resolver_ != nullptr) {
resolver_.reset();
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
lb_policy_.reset();
}
}
}
void ResolvingLoadBalancingPolicy::ExitIdleLocked() {
if (lb_policy_ != nullptr) {
lb_policy_->ExitIdleLocked();
} else {
if (!started_resolving_ && resolver_ != nullptr) {
StartResolvingLocked();
}
}
}
void ResolvingLoadBalancingPolicy::ResetBackoffLocked() {
if (resolver_ != nullptr) {
resolver_->ResetBackoffLocked();
resolver_->RequestReresolutionLocked();
}
if (lb_policy_ != nullptr) {
lb_policy_->ResetBackoffLocked();
}
}
void ResolvingLoadBalancingPolicy::FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) {
if (lb_policy_ != nullptr) {
lb_policy_->FillChildRefsForChannelz(child_subchannels, child_channels);
}
}
void ResolvingLoadBalancingPolicy::StartResolvingLocked() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: starting name resolution", this);
}
GPR_ASSERT(!started_resolving_);
started_resolving_ = true;
Ref().release();
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_);
}
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
void ResolvingLoadBalancingPolicy::OnResolverShutdownLocked(grpc_error* error) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down", this);
}
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
lb_policy_.reset();
}
if (resolver_ != nullptr) {
// This should never happen; it can only be triggered by a resolver
// implementation spotaneously deciding to report shutdown without
// being orphaned. This code is included just to be defensive.
if (tracer_->enabled()) {
gpr_log(GPR_INFO,
"resolving_lb=%p: spontaneous shutdown from resolver %p", this,
resolver_.get());
}
resolver_.reset();
grpc_error* error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver spontaneous shutdown", &error, 1);
channel_control_helper()->UpdateState(
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
UniquePtr<SubchannelPicker>(New<TransientFailurePicker>(error)));
}
grpc_channel_args_destroy(resolver_result_);
resolver_result_ = nullptr;
GRPC_ERROR_UNREF(error);
Unref();
}
// Creates a new LB policy, replacing any previous one.
// Updates trace_strings to indicate what was done.
void ResolvingLoadBalancingPolicy::CreateNewLbPolicyLocked(
const char* lb_policy_name, TraceStringVector* trace_strings) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner();
lb_policy_args.channel_control_helper =
UniquePtr<ChannelControlHelper>(New<ResolvingControlHelper>(Ref()));
lb_policy_args.args = resolver_result_;
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
lb_policy_name, std::move(lb_policy_args));
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
if (channelz_node() != nullptr) {
char* str;
gpr_asprintf(&str, "Could not create LB policy \"%s\"", lb_policy_name);
trace_strings->push_back(str);
}
} else {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, new_lb_policy.get());
}
if (channelz_node() != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \"%s\"", lb_policy_name);
trace_strings->push_back(str);
}
// Propagate channelz node.
auto* channelz = channelz_node();
if (channelz != nullptr) {
new_lb_policy->set_channelz_node(channelz->Ref());
}
// Swap out the LB policy and update the fds in interested_parties_.
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties());
}
lb_policy_ = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
interested_parties());
}
}
void ResolvingLoadBalancingPolicy::MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings) {
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result_);
const bool resolution_contains_addresses =
addresses != nullptr && addresses->size() > 0;
if (!resolution_contains_addresses &&
previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became empty"));
} else if (resolution_contains_addresses &&
!previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became non-empty"));
}
previous_resolution_contained_addresses_ = resolution_contains_addresses;
}
void ResolvingLoadBalancingPolicy::ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const {
if (!trace_strings->empty()) {
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
bool is_first = 1;
for (size_t i = 0; i < trace_strings->size(); ++i) {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
is_first = false;
gpr_strvec_add(&v, (*trace_strings)[i]);
}
char* flat;
size_t flat_len = 0;
flat = gpr_strvec_flatten(&v, &flat_len);
channelz_node()->AddTraceEvent(channelz::ChannelTrace::Severity::Info,
grpc_slice_new(flat, flat_len, gpr_free));
gpr_strvec_destroy(&v);
}
}
// Callback invoked when a resolver result is available.
void ResolvingLoadBalancingPolicy::OnResolverResultChangedLocked(
void* arg, grpc_error* error) {
auto* self = static_cast<ResolvingLoadBalancingPolicy*>(arg);
if (self->tracer_->enabled()) {
const char* disposition =
self->resolver_result_ != nullptr
? ""
: (error == GRPC_ERROR_NONE ? " (transient error)"
: " (resolver shutdown)");
gpr_log(GPR_INFO,
"resolving_lb=%p: got resolver result: resolver_result=%p "
"error=%s%s",
self, self->resolver_result_, grpc_error_string(error),
disposition);
}
// Handle shutdown.
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) {
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error));
return;
}
// We only want to trace the address resolution in the follow cases:
// (a) Address resolution resulted in service config change.
// (b) Address resolution that causes number of backends to go from
// zero to non-zero.
// (c) Address resolution that causes number of backends to go from
// non-zero to zero.
// (d) Address resolution that causes a new LB policy to be created.
//
// we track a list of strings to eventually be concatenated and traced.
TraceStringVector trace_strings;
// resolver_result_ will be null in the case of a transient
// resolution error. In that case, we don't have any new result to
// process, which means that we keep using the previous result (if any).
if (self->resolver_result_ == nullptr) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: resolver transient failure", self);
}
// If we already have an LB policy from a previous resolution
// result, then we continue to let it set the connectivity state.
// Otherwise, we go into TRANSIENT_FAILURE.
if (self->lb_policy_ == nullptr) {
// TODO(roth): When we change the resolver API to be able to
// return transient errors in a cleaner way, we should make it the
// resolver's responsibility to attach a status to the error,
// rather than doing it centrally here.
grpc_error* state_error = grpc_error_set_int(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver transient failure", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
self->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_ERROR_REF(state_error),
UniquePtr<SubchannelPicker>(
New<TransientFailurePicker>(state_error)));
}
} else {
// Parse the resolver result.
const char* lb_policy_name = nullptr;
grpc_json* lb_policy_config = nullptr;
bool service_config_changed = false;
if (self->process_resolver_result_ != nullptr) {
service_config_changed = self->process_resolver_result_(
self->process_resolver_result_user_data_, *self->resolver_result_,
&lb_policy_name, &lb_policy_config);
} else {
lb_policy_name = self->child_policy_name_.get();
lb_policy_config = self->child_lb_config_;
}
GPR_ASSERT(lb_policy_name != nullptr);
// If we're not already using the right LB policy name, instantiate
// a new one.
if (self->lb_policy_ == nullptr ||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: creating new LB policy \"%s\"",
self, lb_policy_name);
}
self->CreateNewLbPolicyLocked(lb_policy_name, &trace_strings);
}
// Update the LB policy with the new addresses and config.
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "resolving_lb=%p: updating LB policy \"%s\" (%p)", self,
lb_policy_name, self->lb_policy_.get());
}
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config);
// Add channel trace event.
if (self->channelz_node() != nullptr) {
if (service_config_changed) {
// TODO(ncteisen): might be worth somehow including a snippet of the
// config in the trace, at the risk of bloating the trace logs.
trace_strings.push_back(gpr_strdup("Service config changed"));
}
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings);
self->ConcatenateAndAddChannelTraceLocked(&trace_strings);
}
// Clean up.
grpc_channel_args_destroy(self->resolver_result_);
self->resolver_result_ = nullptr;
}
// Renew resolver callback.
self->resolver_->NextLocked(&self->resolver_result_,
&self->on_resolver_result_changed_);
}
} // namespace grpc_core

@ -0,0 +1,135 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
// An LB policy that wraps a resolver and a child LB policy to make use
// of the addresses returned by the resolver.
//
// When used in the client_channel code, the resolver will attempt to
// fetch the service config, and the child LB policy name and config
// will be determined based on the service config.
//
// When used in an LB policy implementation that needs to do another
// round of resolution before creating a child policy, the resolver does
// not fetch the service config, and the caller must pre-determine the
// child LB policy and config to use.
class ResolvingLoadBalancingPolicy : public LoadBalancingPolicy {
public:
// If error is set when this returns, then construction failed, and
// the caller may not use the new object.
ResolvingLoadBalancingPolicy(Args args, TraceFlag* tracer,
UniquePtr<char> target_uri,
UniquePtr<char> child_policy_name,
grpc_json* child_lb_config, grpc_error** error);
// Private ctor, to be used by client_channel only!
//
// Synchronous callback that takes the resolver result and sets
// lb_policy_name and lb_policy_config to point to the right data.
// Returns true if the service config has changed since the last result.
typedef bool (*ProcessResolverResultCallback)(void* user_data,
const grpc_channel_args& args,
const char** lb_policy_name,
grpc_json** lb_policy_config);
// If error is set when this returns, then construction failed, and
// the caller may not use the new object.
ResolvingLoadBalancingPolicy(
Args args, TraceFlag* tracer, UniquePtr<char> target_uri,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, grpc_error** error);
virtual const char* name() const override { return "resolving_lb"; }
// No-op -- should never get updates from the channel.
// TODO(roth): Need to support updating child LB policy's config for xds
// use case.
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override {}
void ExitIdleLocked() override;
void ResetBackoffLocked() override;
void FillChildRefsForChannelz(
channelz::ChildRefsList* child_subchannels,
channelz::ChildRefsList* child_channels) override;
private:
using TraceStringVector = InlinedVector<char*, 3>;
class ResolvingControlHelper;
~ResolvingLoadBalancingPolicy();
grpc_error* Init(const grpc_channel_args& args);
void ShutdownLocked() override;
void StartResolvingLocked();
void OnResolverShutdownLocked(grpc_error* error);
void CreateNewLbPolicyLocked(const char* lb_policy_name,
TraceStringVector* trace_strings);
void MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings);
void ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const;
static void OnResolverResultChangedLocked(void* arg, grpc_error* error);
// Passed in from caller at construction time.
TraceFlag* tracer_;
UniquePtr<char> target_uri_;
ProcessResolverResultCallback process_resolver_result_ = nullptr;
void* process_resolver_result_user_data_ = nullptr;
UniquePtr<char> child_policy_name_;
UniquePtr<char> child_lb_config_str_;
grpc_json* child_lb_config_ = nullptr;
// Resolver and associated state.
OrphanablePtr<Resolver> resolver_;
bool started_resolving_ = false;
grpc_channel_args* resolver_result_ = nullptr;
bool previous_resolution_contained_addresses_ = false;
grpc_closure on_resolver_result_changed_;
// Child LB policy and associated state.
OrphanablePtr<LoadBalancingPolicy> lb_policy_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVING_LB_POLICY_H */

@ -956,22 +956,17 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
} else if (c->disconnected_) { } else if (c->disconnected_) {
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} else { } else {
c->SetConnectivityStateLocked(
GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Connect Failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
grpc_connectivity_state_set(
&c->state_and_health_tracker_, GRPC_CHANNEL_TRANSIENT_FAILURE,
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Connect Failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
"connect_failed");
const char* errmsg = grpc_error_string(error); const char* errmsg = grpc_error_string(error);
gpr_log(GPR_INFO, "Connect failed: %s", errmsg); gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
error =
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Connect Failed", &error, 1),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
c->SetConnectivityStateLocked(GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "connect_failed");
grpc_connectivity_state_set(&c->state_and_health_tracker_,
GRPC_CHANNEL_TRANSIENT_FAILURE, error,
"connect_failed");
c->MaybeStartConnectingLocked(); c->MaybeStartConnectingLocked();
GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting"); GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
} }

@ -94,8 +94,9 @@ class InternallyRefCounted : public Orphanable {
// Note: RefCount tracing is only enabled on debug builds, even when a // Note: RefCount tracing is only enabled on debug builds, even when a
// TraceFlag is used. // TraceFlag is used.
template <typename TraceFlagT = TraceFlag> template <typename TraceFlagT = TraceFlag>
explicit InternallyRefCounted(TraceFlagT* trace_flag = nullptr) explicit InternallyRefCounted(TraceFlagT* trace_flag = nullptr,
: refs_(1, trace_flag) {} intptr_t initial_refcount = 1)
: refs_(initial_refcount, trace_flag) {}
virtual ~InternallyRefCounted() = default; virtual ~InternallyRefCounted() = default;
RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT { RefCountedPtr<Child> Ref() GRPC_MUST_USE_RESULT {

@ -218,8 +218,9 @@ class RefCounted : public Impl {
// Note: RefCount tracing is only enabled on debug builds, even when a // Note: RefCount tracing is only enabled on debug builds, even when a
// TraceFlag is used. // TraceFlag is used.
template <typename TraceFlagT = TraceFlag> template <typename TraceFlagT = TraceFlag>
explicit RefCounted(TraceFlagT* trace_flag = nullptr) explicit RefCounted(TraceFlagT* trace_flag = nullptr,
: refs_(1, trace_flag) {} intptr_t initial_refcount = 1)
: refs_(initial_refcount, trace_flag) {}
// Note: Depending on the Impl used, this dtor can be implicitly virtual. // Note: Depending on the Impl used, this dtor can be implicitly virtual.
~RefCounted() = default; ~RefCounted() = default;

@ -329,10 +329,10 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/parse_address.cc', 'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc', 'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc', 'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc', 'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc', 'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc', 'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/resolving_lb_policy.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc', 'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/server_address.cc', 'src/core/ext/filters/client_channel/server_address.cc',
'src/core/ext/filters/client_channel/subchannel.cc', 'src/core/ext/filters/client_channel/subchannel.cc',

@ -45,16 +45,6 @@ static void call_destroy_func(grpc_call_element* elem,
const grpc_call_final_info* final_info, const grpc_call_final_info* final_info,
grpc_closure* ignored) {} grpc_closure* ignored) {}
static void call_func(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {}
static void channel_func(grpc_channel_element* elem, grpc_transport_op* op) {
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
}
bool g_replacement_fn_called = false; bool g_replacement_fn_called = false;
bool g_original_fn_called = false; bool g_original_fn_called = false;
void set_arg_once_fn(grpc_channel_stack* channel_stack, void set_arg_once_fn(grpc_channel_stack* channel_stack,
@ -77,8 +67,8 @@ static void test_channel_stack_builder_filter_replace(void) {
} }
const grpc_channel_filter replacement_filter = { const grpc_channel_filter replacement_filter = {
call_func, grpc_call_next_op,
channel_func, grpc_channel_next_op,
0, 0,
call_init_func, call_init_func,
grpc_call_stack_ignore_set_pollset_or_pollset_set, grpc_call_stack_ignore_set_pollset_or_pollset_set,
@ -90,8 +80,8 @@ const grpc_channel_filter replacement_filter = {
"filter_name"}; "filter_name"};
const grpc_channel_filter original_filter = { const grpc_channel_filter original_filter = {
call_func, grpc_call_next_op,
channel_func, grpc_channel_next_op,
0, 0,
call_init_func, call_init_func,
grpc_call_stack_ignore_set_pollset_or_pollset_set, grpc_call_stack_ignore_set_pollset_or_pollset_set,

@ -48,25 +48,18 @@ namespace {
// A minimal forwarding class to avoid implementing a standalone test LB. // A minimal forwarding class to avoid implementing a standalone test LB.
class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy { class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
public: public:
ForwardingLoadBalancingPolicy(Args args, ForwardingLoadBalancingPolicy(
const std::string& delegate_policy_name) UniquePtr<ChannelControlHelper> delegating_helper, Args args,
: LoadBalancingPolicy(std::move(args)) { const std::string& delegate_policy_name, intptr_t initial_refcount = 1)
: LoadBalancingPolicy(std::move(args), initial_refcount) {
Args delegate_args; Args delegate_args;
delegate_args.combiner = combiner(); delegate_args.combiner = combiner();
delegate_args.client_channel_factory = client_channel_factory(); delegate_args.channel_control_helper = std::move(delegating_helper);
delegate_args.subchannel_pool = subchannel_pool()->Ref();
delegate_args.args = args.args; delegate_args.args = args.args;
delegate_args.lb_config = args.lb_config;
delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy( delegate_ = LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(
delegate_policy_name.c_str(), std::move(delegate_args)); delegate_policy_name.c_str(), std::move(delegate_args));
grpc_pollset_set_add_pollset_set(delegate_->interested_parties(), grpc_pollset_set_add_pollset_set(delegate_->interested_parties(),
interested_parties()); interested_parties());
// Give re-resolution closure to delegate.
GRPC_CLOSURE_INIT(&on_delegate_request_reresolution_,
OnDelegateRequestReresolutionLocked, this,
grpc_combiner_scheduler(combiner()));
Ref().release(); // held by callback.
delegate_->SetReresolutionClosureLocked(&on_delegate_request_reresolution_);
} }
~ForwardingLoadBalancingPolicy() override = default; ~ForwardingLoadBalancingPolicy() override = default;
@ -76,35 +69,6 @@ class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
delegate_->UpdateLocked(args, lb_config); delegate_->UpdateLocked(args, lb_config);
} }
bool PickLocked(PickState* pick, grpc_error** error) override {
return delegate_->PickLocked(pick, error);
}
void CancelPickLocked(PickState* pick, grpc_error* error) override {
delegate_->CancelPickLocked(pick, error);
}
void CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error* error) override {
delegate_->CancelMatchingPicksLocked(initial_metadata_flags_mask,
initial_metadata_flags_eq, error);
}
void NotifyOnStateChangeLocked(grpc_connectivity_state* state,
grpc_closure* closure) override {
delegate_->NotifyOnStateChangeLocked(state, closure);
}
grpc_connectivity_state CheckConnectivityLocked(
grpc_error** connectivity_error) override {
return delegate_->CheckConnectivityLocked(connectivity_error);
}
void HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) override {
delegate_->HandOffPendingPicksLocked(new_policy);
}
void ExitIdleLocked() override { delegate_->ExitIdleLocked(); } void ExitIdleLocked() override { delegate_->ExitIdleLocked(); }
void ResetBackoffLocked() override { delegate_->ResetBackoffLocked(); } void ResetBackoffLocked() override { delegate_->ResetBackoffLocked(); }
@ -116,26 +80,9 @@ class ForwardingLoadBalancingPolicy : public LoadBalancingPolicy {
} }
private: private:
void ShutdownLocked() override { void ShutdownLocked() override { delegate_.reset(); }
delegate_.reset();
TryReresolutionLocked(&grpc_trace_forwarding_lb, GRPC_ERROR_CANCELLED);
}
static void OnDelegateRequestReresolutionLocked(void* arg,
grpc_error* error) {
ForwardingLoadBalancingPolicy* self =
static_cast<ForwardingLoadBalancingPolicy*>(arg);
if (error != GRPC_ERROR_NONE || self->delegate_ == nullptr) {
self->Unref();
return;
}
self->TryReresolutionLocked(&grpc_trace_forwarding_lb, GRPC_ERROR_NONE);
self->delegate_->SetReresolutionClosureLocked(
&self->on_delegate_request_reresolution_);
}
OrphanablePtr<LoadBalancingPolicy> delegate_; OrphanablePtr<LoadBalancingPolicy> delegate_;
grpc_closure on_delegate_request_reresolution_;
}; };
// //
@ -150,10 +97,13 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy
public: public:
InterceptRecvTrailingMetadataLoadBalancingPolicy( InterceptRecvTrailingMetadataLoadBalancingPolicy(
Args args, InterceptRecvTrailingMetadataCallback cb, void* user_data) Args args, InterceptRecvTrailingMetadataCallback cb, void* user_data)
: ForwardingLoadBalancingPolicy(std::move(args), : ForwardingLoadBalancingPolicy(
/*delegate_lb_policy_name=*/"pick_first"), UniquePtr<ChannelControlHelper>(New<Helper>(
cb_(cb), RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy>(
user_data_(user_data) {} this),
cb, user_data)),
std::move(args), /*delegate_lb_policy_name=*/"pick_first",
/*initial_refcount=*/2) {}
~InterceptRecvTrailingMetadataLoadBalancingPolicy() override = default; ~InterceptRecvTrailingMetadataLoadBalancingPolicy() override = default;
@ -161,17 +111,65 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy
return kInterceptRecvTrailingMetadataLbPolicyName; return kInterceptRecvTrailingMetadataLbPolicyName;
} }
bool PickLocked(PickState* pick, grpc_error** error) override {
bool ret = ForwardingLoadBalancingPolicy::PickLocked(pick, error);
// Note: This assumes that the delegate policy does not
// intercepting recv_trailing_metadata. If we ever need to use
// this with a delegate policy that does, then we'll need to
// handle async pick returns separately.
New<TrailingMetadataHandler>(pick, cb_, user_data_); // deletes itself
return ret;
}
private: private:
class Picker : public SubchannelPicker {
public:
explicit Picker(UniquePtr<SubchannelPicker> delegate_picker,
InterceptRecvTrailingMetadataCallback cb, void* user_data)
: delegate_picker_(std::move(delegate_picker)),
cb_(cb),
user_data_(user_data) {}
PickResult Pick(PickState* pick, grpc_error** error) override {
PickResult result = delegate_picker_->Pick(pick, error);
if (result == PICK_COMPLETE && pick->connected_subchannel != nullptr) {
New<TrailingMetadataHandler>(pick, cb_, user_data_); // deletes itself
}
return result;
}
private:
UniquePtr<SubchannelPicker> delegate_picker_;
InterceptRecvTrailingMetadataCallback cb_;
void* user_data_;
};
class Helper : public ChannelControlHelper {
public:
Helper(
RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent,
InterceptRecvTrailingMetadataCallback cb, void* user_data)
: parent_(std::move(parent)), cb_(cb), user_data_(user_data) {}
Subchannel* CreateSubchannel(const grpc_channel_args& args) override {
return parent_->channel_control_helper()->CreateSubchannel(args);
}
grpc_channel* CreateChannel(const char* target,
grpc_client_channel_type type,
const grpc_channel_args& args) override {
return parent_->channel_control_helper()->CreateChannel(target, type,
args);
}
void UpdateState(grpc_connectivity_state state, grpc_error* state_error,
UniquePtr<SubchannelPicker> picker) override {
parent_->channel_control_helper()->UpdateState(
state, state_error,
UniquePtr<SubchannelPicker>(
New<Picker>(std::move(picker), cb_, user_data_)));
}
void RequestReresolution() override {
parent_->channel_control_helper()->RequestReresolution();
}
private:
RefCountedPtr<InterceptRecvTrailingMetadataLoadBalancingPolicy> parent_;
InterceptRecvTrailingMetadataCallback cb_;
void* user_data_;
};
class TrailingMetadataHandler { class TrailingMetadataHandler {
public: public:
TrailingMetadataHandler(PickState* pick, TrailingMetadataHandler(PickState* pick,
@ -204,9 +202,6 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy
grpc_closure* original_recv_trailing_metadata_ready_ = nullptr; grpc_closure* original_recv_trailing_metadata_ready_ = nullptr;
grpc_metadata_batch* recv_trailing_metadata_ = nullptr; grpc_metadata_batch* recv_trailing_metadata_ = nullptr;
}; };
InterceptRecvTrailingMetadataCallback cb_;
void* user_data_;
}; };
class InterceptTrailingFactory : public LoadBalancingPolicyFactory { class InterceptTrailingFactory : public LoadBalancingPolicyFactory {

@ -570,6 +570,7 @@ static void BM_IsolatedFilter(benchmark::State& state) {
} }
gpr_arena_destroy(call_args.arena); gpr_arena_destroy(call_args.arena);
grpc_channel_stack_destroy(channel_stack); grpc_channel_stack_destroy(channel_stack);
grpc_core::ExecCtx::Get()->Flush();
gpr_free(channel_stack); gpr_free(channel_stack);
gpr_free(call_stack); gpr_free(call_stack);

@ -936,8 +936,6 @@ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper.h \ src/core/ext/filters/client_channel/proxy_mapper.h \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \ src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.h \ src/core/ext/filters/client_channel/proxy_mapper_registry.h \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/request_routing.h \
src/core/ext/filters/client_channel/resolver.cc \ src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver.h \ src/core/ext/filters/client_channel/resolver.h \
src/core/ext/filters/client_channel/resolver/README.md \ src/core/ext/filters/client_channel/resolver/README.md \
@ -962,6 +960,8 @@ src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_registry.h \ src/core/ext/filters/client_channel/resolver_registry.h \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \ src/core/ext/filters/client_channel/resolver_result_parsing.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.h \ src/core/ext/filters/client_channel/resolver_result_parsing.h \
src/core/ext/filters/client_channel/resolving_lb_policy.cc \
src/core/ext/filters/client_channel/resolving_lb_policy.h \
src/core/ext/filters/client_channel/retry_throttle.cc \ src/core/ext/filters/client_channel/retry_throttle.cc \
src/core/ext/filters/client_channel/retry_throttle.h \ src/core/ext/filters/client_channel/retry_throttle.h \
src/core/ext/filters/client_channel/server_address.cc \ src/core/ext/filters/client_channel/server_address.cc \

@ -9964,11 +9964,11 @@
"src/core/ext/filters/client_channel/parse_address.h", "src/core/ext/filters/client_channel/parse_address.h",
"src/core/ext/filters/client_channel/proxy_mapper.h", "src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h", "src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.h", "src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h", "src/core/ext/filters/client_channel/resolver_factory.h",
"src/core/ext/filters/client_channel/resolver_registry.h", "src/core/ext/filters/client_channel/resolver_registry.h",
"src/core/ext/filters/client_channel/resolver_result_parsing.h", "src/core/ext/filters/client_channel/resolver_result_parsing.h",
"src/core/ext/filters/client_channel/resolving_lb_policy.h",
"src/core/ext/filters/client_channel/retry_throttle.h", "src/core/ext/filters/client_channel/retry_throttle.h",
"src/core/ext/filters/client_channel/server_address.h", "src/core/ext/filters/client_channel/server_address.h",
"src/core/ext/filters/client_channel/subchannel.h", "src/core/ext/filters/client_channel/subchannel.h",
@ -10011,8 +10011,6 @@
"src/core/ext/filters/client_channel/proxy_mapper.h", "src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.cc", "src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h", "src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.cc",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.cc", "src/core/ext/filters/client_channel/resolver.cc",
"src/core/ext/filters/client_channel/resolver.h", "src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h", "src/core/ext/filters/client_channel/resolver_factory.h",
@ -10020,6 +10018,8 @@
"src/core/ext/filters/client_channel/resolver_registry.h", "src/core/ext/filters/client_channel/resolver_registry.h",
"src/core/ext/filters/client_channel/resolver_result_parsing.cc", "src/core/ext/filters/client_channel/resolver_result_parsing.cc",
"src/core/ext/filters/client_channel/resolver_result_parsing.h", "src/core/ext/filters/client_channel/resolver_result_parsing.h",
"src/core/ext/filters/client_channel/resolving_lb_policy.cc",
"src/core/ext/filters/client_channel/resolving_lb_policy.h",
"src/core/ext/filters/client_channel/retry_throttle.cc", "src/core/ext/filters/client_channel/retry_throttle.cc",
"src/core/ext/filters/client_channel/retry_throttle.h", "src/core/ext/filters/client_channel/retry_throttle.h",
"src/core/ext/filters/client_channel/server_address.cc", "src/core/ext/filters/client_channel/server_address.cc",

Loading…
Cancel
Save