Merge pull request #16008 from markdroth/client_channel_refactor

Refactor request routing code out of client_channel
pull/17573/head
Mark D. Roth 6 years ago committed by GitHub
commit db9be19f5d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      BUILD
  2. 6
      CMakeLists.txt
  3. 6
      Makefile
  4. 2
      build.yaml
  5. 1
      config.m4
  6. 1
      config.w32
  7. 1
      gRPC-C++.podspec
  8. 3
      gRPC-Core.podspec
  9. 2
      grpc.gemspec
  10. 4
      grpc.gyp
  11. 2
      package.xml
  12. 1035
      src/core/ext/filters/client_channel/client_channel.cc
  13. 7
      src/core/ext/filters/client_channel/lb_policy.h
  14. 8
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  15. 8
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  16. 8
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  17. 8
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  18. 936
      src/core/ext/filters/client_channel/request_routing.cc
  19. 177
      src/core/ext/filters/client_channel/request_routing.h
  20. 14
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  21. 30
      src/core/ext/filters/client_channel/resolver_result_parsing.h
  22. 1
      src/python/grpcio/grpc_core_dependencies.py
  23. 2
      tools/doxygen/Doxyfile.core.internal
  24. 3
      tools/run_tests/generated/sources_and_headers.json

@ -1056,6 +1056,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.cc",
"src/core/ext/filters/client_channel/proxy_mapper.cc",
"src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
"src/core/ext/filters/client_channel/request_routing.cc",
"src/core/ext/filters/client_channel/resolver.cc",
"src/core/ext/filters/client_channel/resolver_registry.cc",
"src/core/ext/filters/client_channel/resolver_result_parsing.cc",
@ -1079,6 +1080,7 @@ grpc_cc_library(
"src/core/ext/filters/client_channel/parse_address.h",
"src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h",
"src/core/ext/filters/client_channel/resolver_registry.h",

@ -1210,6 +1210,7 @@ add_library(grpc
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc
@ -1562,6 +1563,7 @@ add_library(grpc_cronet
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc
@ -1935,6 +1937,7 @@ add_library(grpc_test_util
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc
@ -2256,6 +2259,7 @@ add_library(grpc_test_util_unsecure
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc
@ -2589,6 +2593,7 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc
@ -3443,6 +3448,7 @@ add_library(grpc++_cronet
src/core/ext/filters/client_channel/parse_address.cc
src/core/ext/filters/client_channel/proxy_mapper.cc
src/core/ext/filters/client_channel/proxy_mapper_registry.cc
src/core/ext/filters/client_channel/request_routing.cc
src/core/ext/filters/client_channel/resolver.cc
src/core/ext/filters/client_channel/resolver_registry.cc
src/core/ext/filters/client_channel/resolver_result_parsing.cc

@ -3723,6 +3723,7 @@ LIBGRPC_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \
@ -4069,6 +4070,7 @@ LIBGRPC_CRONET_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \
@ -4435,6 +4437,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \
@ -4743,6 +4746,7 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \
@ -5050,6 +5054,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \
@ -5881,6 +5886,7 @@ LIBGRPC++_CRONET_SRC = \
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \

@ -584,6 +584,7 @@ filegroups:
- src/core/ext/filters/client_channel/parse_address.h
- src/core/ext/filters/client_channel/proxy_mapper.h
- src/core/ext/filters/client_channel/proxy_mapper_registry.h
- src/core/ext/filters/client_channel/request_routing.h
- src/core/ext/filters/client_channel/resolver.h
- src/core/ext/filters/client_channel/resolver_factory.h
- src/core/ext/filters/client_channel/resolver_registry.h
@ -608,6 +609,7 @@ filegroups:
- src/core/ext/filters/client_channel/parse_address.cc
- src/core/ext/filters/client_channel/proxy_mapper.cc
- src/core/ext/filters/client_channel/proxy_mapper_registry.cc
- src/core/ext/filters/client_channel/request_routing.cc
- src/core/ext/filters/client_channel/resolver.cc
- src/core/ext/filters/client_channel/resolver_registry.cc
- src/core/ext/filters/client_channel/resolver_result_parsing.cc

@ -352,6 +352,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/filters/client_channel/parse_address.cc \
src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver_registry.cc \
src/core/ext/filters/client_channel/resolver_result_parsing.cc \

@ -327,6 +327,7 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\filters\\client_channel\\parse_address.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper.cc " +
"src\\core\\ext\\filters\\client_channel\\proxy_mapper_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\request_routing.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_registry.cc " +
"src\\core\\ext\\filters\\client_channel\\resolver_result_parsing.cc " +

@ -355,6 +355,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',

@ -349,6 +349,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',
@ -791,6 +792,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -970,6 +972,7 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/parse_address.h',
'src/core/ext/filters/client_channel/proxy_mapper.h',
'src/core/ext/filters/client_channel/proxy_mapper_registry.h',
'src/core/ext/filters/client_channel/request_routing.h',
'src/core/ext/filters/client_channel/resolver.h',
'src/core/ext/filters/client_channel/resolver_factory.h',
'src/core/ext/filters/client_channel/resolver_registry.h',

@ -285,6 +285,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.h )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.h )
s.files += %w( src/core/ext/filters/client_channel/request_routing.h )
s.files += %w( src/core/ext/filters/client_channel/resolver.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_factory.h )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.h )
@ -730,6 +731,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/parse_address.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper.cc )
s.files += %w( src/core/ext/filters/client_channel/proxy_mapper_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/request_routing.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_registry.cc )
s.files += %w( src/core/ext/filters/client_channel/resolver_result_parsing.cc )

@ -534,6 +534,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -794,6 +795,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -1035,6 +1037,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
@ -1288,6 +1291,7 @@
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',

@ -290,6 +290,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_factory.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.h" role="src" />
@ -735,6 +736,7 @@
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/parse_address.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/proxy_mapper_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/request_routing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_registry.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/filters/client_channel/resolver_result_parsing.cc" role="src" />

File diff suppressed because it is too large Load Diff

@ -65,10 +65,10 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
struct PickState {
/// Initial metadata associated with the picking call.
grpc_metadata_batch* initial_metadata = nullptr;
/// Bitmask used for selective cancelling. See
/// Pointer to bitmask used for selective cancelling. See
/// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
/// grpc_types.h.
uint32_t initial_metadata_flags = 0;
uint32_t* initial_metadata_flags = nullptr;
/// Storage for LB token in \a initial_metadata, or nullptr if not used.
grpc_linked_mdelem lb_token_mdelem_storage;
/// Closure to run when pick is complete, if not completed synchronously.
@ -88,6 +88,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
LoadBalancingPolicy(const LoadBalancingPolicy&) = delete;
LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete;
/// Returns the name of the LB policy.
virtual const char* name() const GRPC_ABSTRACT;
/// Updates the policy with a new set of \a args and a new \a lb_config from
/// the resolver. Note that the LB policy gets the set of addresses from the
/// GRPC_ARG_SERVER_ADDRESS_LIST channel arg.

@ -122,10 +122,14 @@ TraceFlag grpc_lb_glb_trace(false, "glb");
namespace {
constexpr char kGrpclb[] = "grpclb";
class GrpcLb : public LoadBalancingPolicy {
public:
explicit GrpcLb(const Args& args);
const char* name() const override { return kGrpclb; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -1136,7 +1140,7 @@ void GrpcLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
@ -1819,7 +1823,7 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<GrpcLb>(args));
}
const char* name() const override { return "grpclb"; }
const char* name() const override { return kGrpclb; }
};
} // namespace

@ -43,10 +43,14 @@ namespace {
// pick_first LB policy
//
constexpr char kPickFirst[] = "pick_first";
class PickFirst : public LoadBalancingPolicy {
public:
explicit PickFirst(const Args& args);
const char* name() const override { return kPickFirst; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -234,7 +238,7 @@ void PickFirst::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
GRPC_CLOSURE_SCHED(pick->on_complete,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -622,7 +626,7 @@ class PickFirstFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<PickFirst>(args));
}
const char* name() const override { return "pick_first"; }
const char* name() const override { return kPickFirst; }
};
} // namespace

@ -53,10 +53,14 @@ namespace {
// round_robin LB policy
//
constexpr char kRoundRobin[] = "round_robin";
class RoundRobin : public LoadBalancingPolicy {
public:
explicit RoundRobin(const Args& args);
const char* name() const override { return kRoundRobin; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -291,7 +295,7 @@ void RoundRobin::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pick != nullptr) {
PickState* next = pick->next;
if ((pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
pick->connected_subchannel.reset();
GRPC_CLOSURE_SCHED(pick->on_complete,
@ -700,7 +704,7 @@ class RoundRobinFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<RoundRobin>(args));
}
const char* name() const override { return "round_robin"; }
const char* name() const override { return kRoundRobin; }
};
} // namespace

@ -115,10 +115,14 @@ TraceFlag grpc_lb_xds_trace(false, "xds");
namespace {
constexpr char kXds[] = "xds_experimental";
class XdsLb : public LoadBalancingPolicy {
public:
explicit XdsLb(const Args& args);
const char* name() const override { return kXds; }
void UpdateLocked(const grpc_channel_args& args,
grpc_json* lb_config) override;
bool PickLocked(PickState* pick, grpc_error** error) override;
@ -1053,7 +1057,7 @@ void XdsLb::CancelMatchingPicksLocked(uint32_t initial_metadata_flags_mask,
pending_picks_ = nullptr;
while (pp != nullptr) {
PendingPick* next = pp->next;
if ((pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
if ((*pp->pick->initial_metadata_flags & initial_metadata_flags_mask) ==
initial_metadata_flags_eq) {
// Note: pp is deleted in this callback.
GRPC_CLOSURE_SCHED(&pp->on_complete,
@ -1651,7 +1655,7 @@ class XdsFactory : public LoadBalancingPolicyFactory {
return OrphanablePtr<LoadBalancingPolicy>(New<XdsLb>(args));
}
const char* name() const override { return "xds_experimental"; }
const char* name() const override { return kXds; }
};
} // namespace

@ -0,0 +1,936 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/request_routing.h"
#include <inttypes.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/resolver_registry.h"
#include "src/core/ext/filters/client_channel/retry_throttle.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/service_config.h"
#include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/status_metadata.h"
namespace grpc_core {
//
// RequestRouter::Request::ResolverResultWaiter
//
// Handles waiting for a resolver result.
// Used only for the first call on an idle channel.
class RequestRouter::Request::ResolverResultWaiter {
public:
explicit ResolverResultWaiter(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
if (tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: deferring pick pending resolver "
"result",
request_router_, request);
}
// Add closure to be run when a resolver result is available.
GRPC_CLOSURE_INIT(&done_closure_, &DoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
AddToWaitingList();
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
private:
// Adds done_closure_ to
// request_router_->waiting_for_resolver_result_closures_.
void AddToWaitingList() {
grpc_closure_list_append(
&request_router_->waiting_for_resolver_result_closures_, &done_closure_,
GRPC_ERROR_NONE);
}
// Invoked when a resolver result is available.
static void DoneLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If CancelLocked() has already run, delete ourselves without doing
// anything. Note that the call stack may have already been destroyed,
// so it's not safe to access anything in state_.
if (GPR_UNLIKELY(self->finished_)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p: call cancelled before resolver result",
request_router);
}
Delete(self);
return;
}
// Otherwise, process the resolver result.
Request* request = self->request_;
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver failed to return data",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_, GRPC_ERROR_REF(error));
} else if (GPR_UNLIKELY(request_router->resolver_ == nullptr)) {
// Shutting down.
if (self->tracer_enabled_) {
gpr_log(GPR_INFO, "request_router=%p request=%p: resolver disconnected",
request_router, request);
}
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else if (GPR_UNLIKELY(request_router->lb_policy_ == nullptr)) {
// Transient resolver failure.
// If call has wait_for_ready=true, try again; otherwise, fail.
if (*request->pick_.initial_metadata_flags &
GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=true; trying again",
request_router, request);
}
// Re-add ourselves to the waiting list.
self->AddToWaitingList();
// Return early so that we don't set finished_ to true below.
return;
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned but no LB "
"policy; wait_for_ready=false; failing",
request_router, request);
}
GRPC_CLOSURE_RUN(
request->on_route_done_,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Name resolution failure"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
} else {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: resolver returned, doing LB "
"pick",
request_router, request);
}
request->ProcessServiceConfigAndStartLbPickLocked();
}
self->finished_ = true;
}
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
ResolverResultWaiter* self = static_cast<ResolverResultWaiter*>(arg);
RequestRouter* request_router = self->request_router_;
// If DoneLocked() has already run, delete ourselves without doing anything.
if (self->finished_) {
Delete(self);
return;
}
Request* request = self->request_;
// If we are being cancelled, immediately invoke on_route_done_
// to propagate the error back to the caller.
if (error != GRPC_ERROR_NONE) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling call waiting for "
"name resolution",
request_router, request);
}
// Note: Although we are not in the call combiner here, we are
// basically stealing the call combiner from the pending pick, so
// it's safe to run on_route_done_ here -- we are essentially
// calling it here instead of calling it in DoneLocked().
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Pick cancelled", &error, 1));
}
self->finished_ = true;
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure done_closure_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request::AsyncPickCanceller
//
// Handles the call combiner cancellation callback for an async LB pick.
class RequestRouter::Request::AsyncPickCanceller {
public:
explicit AsyncPickCanceller(Request* request)
: request_router_(request->request_router_),
request_(request),
tracer_enabled_(request_router_->tracer_->enabled()) {
GRPC_CALL_STACK_REF(request->owning_call_, "pick_callback_cancel");
// Set cancellation closure, so that we abort if the call is cancelled.
GRPC_CLOSURE_INIT(&cancel_closure_, &CancelLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
grpc_call_combiner_set_notify_on_cancel(request->call_combiner_,
&cancel_closure_);
}
void MarkFinishedLocked() {
finished_ = true;
GRPC_CALL_STACK_UNREF(request_->owning_call_, "pick_callback_cancel");
}
private:
// Invoked when the call is cancelled.
// Note: This runs under the client_channel combiner, but will NOT be
// holding the call combiner.
static void CancelLocked(void* arg, grpc_error* error) {
AsyncPickCanceller* self = static_cast<AsyncPickCanceller*>(arg);
Request* request = self->request_;
RequestRouter* request_router = self->request_router_;
if (!self->finished_) {
// Note: request_router->lb_policy_ may have changed since we started our
// pick, in which case we will be cancelling the pick on a policy other
// than the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && request_router->lb_policy_ != nullptr) {
if (self->tracer_enabled_) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: cancelling pick from LB "
"policy %p",
request_router, request, request_router->lb_policy_.get());
}
request_router->lb_policy_->CancelPickLocked(&request->pick_,
GRPC_ERROR_REF(error));
}
request->pick_canceller_ = nullptr;
GRPC_CALL_STACK_UNREF(request->owning_call_, "pick_callback_cancel");
}
Delete(self);
}
RequestRouter* request_router_;
Request* request_;
const bool tracer_enabled_;
grpc_closure cancel_closure_;
bool finished_ = false;
};
//
// RequestRouter::Request
//
RequestRouter::Request::Request(grpc_call_stack* owning_call,
grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data,
grpc_closure* on_route_done)
: owning_call_(owning_call),
call_combiner_(call_combiner),
pollent_(pollent),
apply_service_config_(apply_service_config),
apply_service_config_user_data_(apply_service_config_user_data),
on_route_done_(on_route_done) {
pick_.initial_metadata = send_initial_metadata;
pick_.initial_metadata_flags = send_initial_metadata_flags;
}
RequestRouter::Request::~Request() {
if (pick_.connected_subchannel != nullptr) {
pick_.connected_subchannel.reset();
}
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
if (pick_.subchannel_call_context[i].destroy != nullptr) {
pick_.subchannel_call_context[i].destroy(
pick_.subchannel_call_context[i].value);
}
}
}
// Invoked once resolver results are available.
void RequestRouter::Request::ProcessServiceConfigAndStartLbPickLocked() {
// Get service config data if needed.
if (!apply_service_config_(apply_service_config_user_data_)) return;
// Start LB pick.
StartLbPickLocked();
}
void RequestRouter::Request::MaybeAddCallToInterestedPartiesLocked() {
if (!pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = true;
grpc_polling_entity_add_to_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
void RequestRouter::Request::MaybeRemoveCallFromInterestedPartiesLocked() {
if (pollent_added_to_interested_parties_) {
pollent_added_to_interested_parties_ = false;
grpc_polling_entity_del_from_pollset_set(
pollent_, request_router_->interested_parties_);
}
}
// Starts a pick on the LB policy.
void RequestRouter::Request::StartLbPickLocked() {
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: starting pick on lb_policy=%p",
request_router_, this, request_router_->lb_policy_.get());
}
GRPC_CLOSURE_INIT(&on_pick_done_, &LbPickDoneLocked, this,
grpc_combiner_scheduler(request_router_->combiner_));
pick_.on_complete = &on_pick_done_;
GRPC_CALL_STACK_REF(owning_call_, "pick_callback");
grpc_error* error = GRPC_ERROR_NONE;
const bool pick_done =
request_router_->lb_policy_->PickLocked(&pick_, &error);
if (pick_done) {
// Pick completed synchronously.
if (request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed synchronously",
request_router_, this);
}
GRPC_CLOSURE_RUN(on_route_done_, error);
GRPC_CALL_STACK_UNREF(owning_call_, "pick_callback");
} else {
// Pick will be returned asynchronously.
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the LB policy can be done
// under it. It will be removed in LbPickDoneLocked().
MaybeAddCallToInterestedPartiesLocked();
// Request notification on call cancellation.
// We allocate a separate object to track cancellation, since the
// cancellation closure might still be pending when we need to reuse
// the memory in which this Request object is stored for a subsequent
// retry attempt.
pick_canceller_ = New<AsyncPickCanceller>(this);
}
}
// Callback invoked by LoadBalancingPolicy::PickLocked() for async picks.
// Unrefs the LB policy and invokes on_route_done_.
void RequestRouter::Request::LbPickDoneLocked(void* arg, grpc_error* error) {
Request* self = static_cast<Request*>(arg);
RequestRouter* request_router = self->request_router_;
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p request=%p: pick completed asynchronously",
request_router, self);
}
self->MaybeRemoveCallFromInterestedPartiesLocked();
if (self->pick_canceller_ != nullptr) {
self->pick_canceller_->MarkFinishedLocked();
}
GRPC_CLOSURE_RUN(self->on_route_done_, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(self->owning_call_, "pick_callback");
}
//
// RequestRouter::LbConnectivityWatcher
//
class RequestRouter::LbConnectivityWatcher {
public:
LbConnectivityWatcher(RequestRouter* request_router,
grpc_connectivity_state state,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
state_(state),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "LbConnectivityWatcher");
GRPC_CLOSURE_INIT(&on_changed_, &OnLbPolicyStateChangedLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->NotifyOnStateChangeLocked(&state_, &on_changed_);
}
~LbConnectivityWatcher() {
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "LbConnectivityWatcher");
}
private:
static void OnLbPolicyStateChangedLocked(void* arg, grpc_error* error) {
LbConnectivityWatcher* self = static_cast<LbConnectivityWatcher*>(arg);
// If the notification is not for the current policy, we're stale,
// so delete ourselves.
if (self->lb_policy_ != self->request_router_->lb_policy_.get()) {
Delete(self);
return;
}
// Otherwise, process notification.
if (self->request_router_->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: lb_policy=%p state changed to %s",
self->request_router_, self->lb_policy_,
grpc_connectivity_state_name(self->state_));
}
self->request_router_->SetConnectivityStateLocked(
self->state_, GRPC_ERROR_REF(error), "lb_changed");
// If shutting down, terminate watch.
if (self->state_ == GRPC_CHANNEL_SHUTDOWN) {
Delete(self);
return;
}
// Renew watch.
self->lb_policy_->NotifyOnStateChangeLocked(&self->state_,
&self->on_changed_);
}
RequestRouter* request_router_;
grpc_connectivity_state state_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure on_changed_;
};
//
// RequestRounter::ReresolutionRequestHandler
//
class RequestRouter::ReresolutionRequestHandler {
public:
ReresolutionRequestHandler(RequestRouter* request_router,
LoadBalancingPolicy* lb_policy,
grpc_channel_stack* owning_stack,
grpc_combiner* combiner)
: request_router_(request_router),
lb_policy_(lb_policy),
owning_stack_(owning_stack) {
GRPC_CHANNEL_STACK_REF(owning_stack_, "ReresolutionRequestHandler");
GRPC_CLOSURE_INIT(&closure_, &OnRequestReresolutionLocked, this,
grpc_combiner_scheduler(combiner));
lb_policy_->SetReresolutionClosureLocked(&closure_);
}
private:
static void OnRequestReresolutionLocked(void* arg, grpc_error* error) {
ReresolutionRequestHandler* self =
static_cast<ReresolutionRequestHandler*>(arg);
RequestRouter* request_router = self->request_router_;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
if (self->lb_policy_ != request_router->lb_policy_.get() ||
error != GRPC_ERROR_NONE || request_router->resolver_ == nullptr) {
GRPC_CHANNEL_STACK_UNREF(request_router->owning_stack_,
"ReresolutionRequestHandler");
Delete(self);
return;
}
if (request_router->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: started name re-resolving",
request_router);
}
request_router->resolver_->RequestReresolutionLocked();
// Give back the closure to the LB policy.
self->lb_policy_->SetReresolutionClosureLocked(&self->closure_);
}
RequestRouter* request_router_;
// LB policy address. No ref held, so not safe to dereference unless
// it happens to match request_router->lb_policy_.
LoadBalancingPolicy* lb_policy_;
grpc_channel_stack* owning_stack_;
grpc_closure closure_;
};
//
// RequestRouter
//
RequestRouter::RequestRouter(
grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error)
: owning_stack_(owning_stack),
combiner_(combiner),
client_channel_factory_(client_channel_factory),
interested_parties_(interested_parties),
tracer_(tracer),
process_resolver_result_(process_resolver_result),
process_resolver_result_user_data_(process_resolver_result_user_data) {
GRPC_CLOSURE_INIT(&on_resolver_result_changed_,
&RequestRouter::OnResolverResultChangedLocked, this,
grpc_combiner_scheduler(combiner));
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"request_router");
grpc_channel_args* new_args = nullptr;
if (process_resolver_result == nullptr) {
grpc_arg arg = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_SERVICE_CONFIG_DISABLE_RESOLUTION), 0);
new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
}
resolver_ = ResolverRegistry::CreateResolver(
target_uri, (new_args == nullptr ? args : new_args), interested_parties_,
combiner_);
grpc_channel_args_destroy(new_args);
if (resolver_ == nullptr) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
}
}
RequestRouter::~RequestRouter() {
if (resolver_ != nullptr) {
// The only way we can get here is if we never started resolving,
// because we take a ref to the channel stack when we start
// resolving and do not release it until the resolver callback is
// invoked after the resolver shuts down.
resolver_.reset();
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (client_channel_factory_ != nullptr) {
grpc_client_channel_factory_unref(client_channel_factory_);
}
grpc_connectivity_state_destroy(&state_tracker_);
}
namespace {
const char* GetChannelConnectivityStateChangeString(
grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "Channel state change to IDLE";
case GRPC_CHANNEL_CONNECTING:
return "Channel state change to CONNECTING";
case GRPC_CHANNEL_READY:
return "Channel state change to READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "Channel state change to TRANSIENT_FAILURE";
case GRPC_CHANNEL_SHUTDOWN:
return "Channel state change to SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
} // namespace
void RequestRouter::SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error,
const char* reason) {
if (lb_policy_ != nullptr) {
if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// Cancel picks with wait_for_ready=false.
lb_policy_->CancelMatchingPicksLocked(
/* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
/* check= */ 0, GRPC_ERROR_REF(error));
} else if (state == GRPC_CHANNEL_SHUTDOWN) {
// Cancel all picks.
lb_policy_->CancelMatchingPicksLocked(/* mask= */ 0, /* check= */ 0,
GRPC_ERROR_REF(error));
}
}
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: setting connectivity state to %s",
this, grpc_connectivity_state_name(state));
}
if (channelz_node_ != nullptr) {
channelz_node_->AddTraceEvent(
channelz::ChannelTrace::Severity::Info,
grpc_slice_from_static_string(
GetChannelConnectivityStateChangeString(state)));
}
grpc_connectivity_state_set(&state_tracker_, state, error, reason);
}
void RequestRouter::StartResolvingLocked() {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: starting name resolution", this);
}
GPR_ASSERT(!started_resolving_);
started_resolving_ = true;
GRPC_CHANNEL_STACK_REF(owning_stack_, "resolver");
resolver_->NextLocked(&resolver_result_, &on_resolver_result_changed_);
}
// Invoked from the resolver NextLocked() callback when the resolver
// is shutting down.
void RequestRouter::OnResolverShutdownLocked(grpc_error* error) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down", this);
}
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
if (resolver_ != nullptr) {
// This should never happen; it can only be triggered by a resolver
// implementation spotaneously deciding to report shutdown without
// being orphaned. This code is included just to be defensive.
if (tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: spontaneous shutdown from resolver %p", this,
resolver_.get());
}
resolver_.reset();
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Resolver spontaneous shutdown", &error, 1),
"resolver_spontaneous_shutdown");
}
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Channel disconnected", &error, 1));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
GRPC_CHANNEL_STACK_UNREF(owning_stack_, "resolver");
grpc_channel_args_destroy(resolver_result_);
resolver_result_ = nullptr;
GRPC_ERROR_UNREF(error);
}
// Creates a new LB policy, replacing any previous one.
// If the new policy is created successfully, sets *connectivity_state and
// *connectivity_error to its initial connectivity state; otherwise,
// leaves them unchanged.
void RequestRouter::CreateNewLbPolicyLocked(
const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error, TraceStringVector* trace_strings) {
LoadBalancingPolicy::Args lb_policy_args;
lb_policy_args.combiner = combiner_;
lb_policy_args.client_channel_factory = client_channel_factory_;
lb_policy_args.args = resolver_result_;
lb_policy_args.lb_config = lb_config;
OrphanablePtr<LoadBalancingPolicy> new_lb_policy =
LoadBalancingPolicyRegistry::CreateLoadBalancingPolicy(lb_policy_name,
lb_policy_args);
if (GPR_UNLIKELY(new_lb_policy == nullptr)) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Could not create LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
} else {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: created new LB policy \"%s\" (%p)",
this, lb_policy_name, new_lb_policy.get());
}
if (channelz_node_ != nullptr) {
char* str;
gpr_asprintf(&str, "Created new LB policy \'%s\'", lb_policy_name);
trace_strings->push_back(str);
}
// Swap out the LB policy and update the fds in interested_parties_.
if (lb_policy_ != nullptr) {
if (tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_->HandOffPendingPicksLocked(new_lb_policy.get());
}
lb_policy_ = std::move(new_lb_policy);
grpc_pollset_set_add_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
// Create re-resolution request handler for the new LB policy. It
// will delete itself when no longer needed.
New<ReresolutionRequestHandler>(this, lb_policy_.get(), owning_stack_,
combiner_);
// Get the new LB policy's initial connectivity state and start a
// connectivity watch.
GRPC_ERROR_UNREF(*connectivity_error);
*connectivity_state =
lb_policy_->CheckConnectivityLocked(connectivity_error);
if (exit_idle_when_lb_policy_arrives_) {
lb_policy_->ExitIdleLocked();
exit_idle_when_lb_policy_arrives_ = false;
}
// Create new watcher. It will delete itself when done.
New<LbConnectivityWatcher>(this, *connectivity_state, lb_policy_.get(),
owning_stack_, combiner_);
}
}
void RequestRouter::MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings) {
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result_);
const bool resolution_contains_addresses =
addresses != nullptr && addresses->size() > 0;
if (!resolution_contains_addresses &&
previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became empty"));
} else if (resolution_contains_addresses &&
!previous_resolution_contained_addresses_) {
trace_strings->push_back(gpr_strdup("Address list became non-empty"));
}
previous_resolution_contained_addresses_ = resolution_contains_addresses;
}
void RequestRouter::ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const {
if (!trace_strings->empty()) {
gpr_strvec v;
gpr_strvec_init(&v);
gpr_strvec_add(&v, gpr_strdup("Resolution event: "));
bool is_first = 1;
for (size_t i = 0; i < trace_strings->size(); ++i) {
if (!is_first) gpr_strvec_add(&v, gpr_strdup(", "));
is_first = false;
gpr_strvec_add(&v, (*trace_strings)[i]);
}
char* flat;
size_t flat_len = 0;
flat = gpr_strvec_flatten(&v, &flat_len);
channelz_node_->AddTraceEvent(
grpc_core::channelz::ChannelTrace::Severity::Info,
grpc_slice_new(flat, flat_len, gpr_free));
gpr_strvec_destroy(&v);
}
}
// Callback invoked when a resolver result is available.
void RequestRouter::OnResolverResultChangedLocked(void* arg,
grpc_error* error) {
RequestRouter* self = static_cast<RequestRouter*>(arg);
if (self->tracer_->enabled()) {
const char* disposition =
self->resolver_result_ != nullptr
? ""
: (error == GRPC_ERROR_NONE ? " (transient error)"
: " (resolver shutdown)");
gpr_log(GPR_INFO,
"request_router=%p: got resolver result: resolver_result=%p "
"error=%s%s",
self, self->resolver_result_, grpc_error_string(error),
disposition);
}
// Handle shutdown.
if (error != GRPC_ERROR_NONE || self->resolver_ == nullptr) {
self->OnResolverShutdownLocked(GRPC_ERROR_REF(error));
return;
}
// Data used to set the channel's connectivity state.
bool set_connectivity_state = true;
// We only want to trace the address resolution in the follow cases:
// (a) Address resolution resulted in service config change.
// (b) Address resolution that causes number of backends to go from
// zero to non-zero.
// (c) Address resolution that causes number of backends to go from
// non-zero to zero.
// (d) Address resolution that causes a new LB policy to be created.
//
// we track a list of strings to eventually be concatenated and traced.
TraceStringVector trace_strings;
grpc_connectivity_state connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_error* connectivity_error =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
// resolver_result_ will be null in the case of a transient
// resolution error. In that case, we don't have any new result to
// process, which means that we keep using the previous result (if any).
if (self->resolver_result_ == nullptr) {
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO, "request_router=%p: resolver transient failure", self);
}
// Don't override connectivity state if we already have an LB policy.
if (self->lb_policy_ != nullptr) set_connectivity_state = false;
} else {
// Parse the resolver result.
const char* lb_policy_name = nullptr;
grpc_json* lb_policy_config = nullptr;
const bool service_config_changed = self->process_resolver_result_(
self->process_resolver_result_user_data_, *self->resolver_result_,
&lb_policy_name, &lb_policy_config);
GPR_ASSERT(lb_policy_name != nullptr);
// Check to see if we're already using the right LB policy.
const bool lb_policy_name_changed =
self->lb_policy_ == nullptr ||
strcmp(self->lb_policy_->name(), lb_policy_name) != 0;
if (self->lb_policy_ != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
if (self->tracer_->enabled()) {
gpr_log(GPR_INFO,
"request_router=%p: updating existing LB policy \"%s\" (%p)",
self, lb_policy_name, self->lb_policy_.get());
}
self->lb_policy_->UpdateLocked(*self->resolver_result_, lb_policy_config);
// No need to set the channel's connectivity state; the existing
// watch on the LB policy will take care of that.
set_connectivity_state = false;
} else {
// Instantiate new LB policy.
self->CreateNewLbPolicyLocked(lb_policy_name, lb_policy_config,
&connectivity_state, &connectivity_error,
&trace_strings);
}
// Add channel trace event.
if (self->channelz_node_ != nullptr) {
if (service_config_changed) {
// TODO(ncteisen): might be worth somehow including a snippet of the
// config in the trace, at the risk of bloating the trace logs.
trace_strings.push_back(gpr_strdup("Service config changed"));
}
self->MaybeAddTraceMessagesForAddressChangesLocked(&trace_strings);
self->ConcatenateAndAddChannelTraceLocked(&trace_strings);
}
// Clean up.
grpc_channel_args_destroy(self->resolver_result_);
self->resolver_result_ = nullptr;
}
// Set the channel's connectivity state if needed.
if (set_connectivity_state) {
self->SetConnectivityStateLocked(connectivity_state, connectivity_error,
"resolver_result");
} else {
GRPC_ERROR_UNREF(connectivity_error);
}
// Invoke closures that were waiting for results and renew the watch.
GRPC_CLOSURE_LIST_SCHED(&self->waiting_for_resolver_result_closures_);
self->resolver_->NextLocked(&self->resolver_result_,
&self->on_resolver_result_changed_);
}
void RequestRouter::RouteCallLocked(Request* request) {
GPR_ASSERT(request->pick_.connected_subchannel == nullptr);
request->request_router_ = this;
if (lb_policy_ != nullptr) {
// We already have resolver results, so process the service config
// and start an LB pick.
request->ProcessServiceConfigAndStartLbPickLocked();
} else if (resolver_ == nullptr) {
GRPC_CLOSURE_RUN(request->on_route_done_,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
} else {
// We do not yet have an LB policy, so wait for a resolver result.
if (!started_resolving_) {
StartResolvingLocked();
}
// Create a new waiter, which will delete itself when done.
New<Request::ResolverResultWaiter>(request);
// Add the request's polling entity to the request_router's
// interested_parties, so that the I/O of the resolver can be done
// under it. It will be removed in LbPickDoneLocked().
request->MaybeAddCallToInterestedPartiesLocked();
}
}
void RequestRouter::ShutdownLocked(grpc_error* error) {
if (resolver_ != nullptr) {
SetConnectivityStateLocked(GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
"disconnect");
resolver_.reset();
if (!started_resolving_) {
grpc_closure_list_fail_all(&waiting_for_resolver_result_closures_,
GRPC_ERROR_REF(error));
GRPC_CLOSURE_LIST_SCHED(&waiting_for_resolver_result_closures_);
}
if (lb_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
}
}
GRPC_ERROR_UNREF(error);
}
grpc_connectivity_state RequestRouter::GetConnectivityState() {
return grpc_connectivity_state_check(&state_tracker_);
}
void RequestRouter::NotifyOnConnectivityStateChange(
grpc_connectivity_state* state, grpc_closure* closure) {
grpc_connectivity_state_notify_on_state_change(&state_tracker_, state,
closure);
}
void RequestRouter::ExitIdleLocked() {
if (lb_policy_ != nullptr) {
lb_policy_->ExitIdleLocked();
} else {
exit_idle_when_lb_policy_arrives_ = true;
if (!started_resolving_ && resolver_ != nullptr) {
StartResolvingLocked();
}
}
}
void RequestRouter::ResetConnectionBackoffLocked() {
if (resolver_ != nullptr) {
resolver_->ResetBackoffLocked();
resolver_->RequestReresolutionLocked();
}
if (lb_policy_ != nullptr) {
lb_policy_->ResetBackoffLocked();
}
}
} // namespace grpc_core

@ -0,0 +1,177 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H
#include <grpc/support/port_platform.h>
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/client_channel_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/resolver.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/inlined_vector.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
class RequestRouter {
public:
class Request {
public:
// Synchronous callback that applies the service config to a call.
// Returns false if the call should be failed.
typedef bool (*ApplyServiceConfigCallback)(void* user_data);
Request(grpc_call_stack* owning_call, grpc_call_combiner* call_combiner,
grpc_polling_entity* pollent,
grpc_metadata_batch* send_initial_metadata,
uint32_t* send_initial_metadata_flags,
ApplyServiceConfigCallback apply_service_config,
void* apply_service_config_user_data, grpc_closure* on_route_done);
~Request();
// TODO(roth): It seems a bit ugly to expose this member in a
// non-const way. Find a better API to avoid this.
LoadBalancingPolicy::PickState* pick() { return &pick_; }
private:
friend class RequestRouter;
class ResolverResultWaiter;
class AsyncPickCanceller;
void ProcessServiceConfigAndStartLbPickLocked();
void StartLbPickLocked();
static void LbPickDoneLocked(void* arg, grpc_error* error);
void MaybeAddCallToInterestedPartiesLocked();
void MaybeRemoveCallFromInterestedPartiesLocked();
// Populated by caller.
grpc_call_stack* owning_call_;
grpc_call_combiner* call_combiner_;
grpc_polling_entity* pollent_;
ApplyServiceConfigCallback apply_service_config_;
void* apply_service_config_user_data_;
grpc_closure* on_route_done_;
LoadBalancingPolicy::PickState pick_;
// Internal state.
RequestRouter* request_router_ = nullptr;
bool pollent_added_to_interested_parties_ = false;
grpc_closure on_pick_done_;
AsyncPickCanceller* pick_canceller_ = nullptr;
};
// Synchronous callback that takes the service config JSON string and
// LB policy name.
// Returns true if the service config has changed since the last result.
typedef bool (*ProcessResolverResultCallback)(void* user_data,
const grpc_channel_args& args,
const char** lb_policy_name,
grpc_json** lb_policy_config);
RequestRouter(grpc_channel_stack* owning_stack, grpc_combiner* combiner,
grpc_client_channel_factory* client_channel_factory,
grpc_pollset_set* interested_parties, TraceFlag* tracer,
ProcessResolverResultCallback process_resolver_result,
void* process_resolver_result_user_data, const char* target_uri,
const grpc_channel_args* args, grpc_error** error);
~RequestRouter();
void set_channelz_node(channelz::ClientChannelNode* channelz_node) {
channelz_node_ = channelz_node;
}
void RouteCallLocked(Request* request);
// TODO(roth): Add methods to cancel picks.
void ShutdownLocked(grpc_error* error);
void ExitIdleLocked();
void ResetConnectionBackoffLocked();
grpc_connectivity_state GetConnectivityState();
void NotifyOnConnectivityStateChange(grpc_connectivity_state* state,
grpc_closure* closure);
LoadBalancingPolicy* lb_policy() const { return lb_policy_.get(); }
private:
using TraceStringVector = grpc_core::InlinedVector<char*, 3>;
class ReresolutionRequestHandler;
class LbConnectivityWatcher;
void StartResolvingLocked();
void OnResolverShutdownLocked(grpc_error* error);
void CreateNewLbPolicyLocked(const char* lb_policy_name, grpc_json* lb_config,
grpc_connectivity_state* connectivity_state,
grpc_error** connectivity_error,
TraceStringVector* trace_strings);
void MaybeAddTraceMessagesForAddressChangesLocked(
TraceStringVector* trace_strings);
void ConcatenateAndAddChannelTraceLocked(
TraceStringVector* trace_strings) const;
static void OnResolverResultChangedLocked(void* arg, grpc_error* error);
void SetConnectivityStateLocked(grpc_connectivity_state state,
grpc_error* error, const char* reason);
// Passed in from caller at construction time.
grpc_channel_stack* owning_stack_;
grpc_combiner* combiner_;
grpc_client_channel_factory* client_channel_factory_;
grpc_pollset_set* interested_parties_;
TraceFlag* tracer_;
channelz::ClientChannelNode* channelz_node_ = nullptr;
// Resolver and associated state.
OrphanablePtr<Resolver> resolver_;
ProcessResolverResultCallback process_resolver_result_;
void* process_resolver_result_user_data_;
bool started_resolving_ = false;
grpc_channel_args* resolver_result_ = nullptr;
bool previous_resolution_contained_addresses_ = false;
grpc_closure_list waiting_for_resolver_result_closures_;
grpc_closure on_resolver_result_changed_;
// LB policy and associated state.
OrphanablePtr<LoadBalancingPolicy> lb_policy_;
bool exit_idle_when_lb_policy_arrives_ = false;
grpc_connectivity_state_tracker state_tracker_;
};
} // namespace grpc_core
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_REQUEST_ROUTING_H */

@ -43,16 +43,16 @@ namespace grpc_core {
namespace internal {
ProcessedResolverResult::ProcessedResolverResult(
const grpc_channel_args* resolver_result, bool parse_retry) {
const grpc_channel_args& resolver_result, bool parse_retry) {
ProcessServiceConfig(resolver_result, parse_retry);
// If no LB config was found above, just find the LB policy name then.
if (lb_policy_name_ == nullptr) ProcessLbPolicyName(resolver_result);
}
void ProcessedResolverResult::ProcessServiceConfig(
const grpc_channel_args* resolver_result, bool parse_retry) {
const grpc_channel_args& resolver_result, bool parse_retry) {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_SERVICE_CONFIG);
grpc_channel_args_find(&resolver_result, GRPC_ARG_SERVICE_CONFIG);
const char* service_config_json = grpc_channel_arg_get_string(channel_arg);
if (service_config_json != nullptr) {
service_config_json_.reset(gpr_strdup(service_config_json));
@ -60,7 +60,7 @@ void ProcessedResolverResult::ProcessServiceConfig(
if (service_config_ != nullptr) {
if (parse_retry) {
channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_SERVER_URI);
grpc_channel_args_find(&resolver_result, GRPC_ARG_SERVER_URI);
const char* server_uri = grpc_channel_arg_get_string(channel_arg);
GPR_ASSERT(server_uri != nullptr);
grpc_uri* uri = grpc_uri_parse(server_uri, true);
@ -78,7 +78,7 @@ void ProcessedResolverResult::ProcessServiceConfig(
}
void ProcessedResolverResult::ProcessLbPolicyName(
const grpc_channel_args* resolver_result) {
const grpc_channel_args& resolver_result) {
// Prefer the LB policy name found in the service config. Note that this is
// checking the deprecated loadBalancingPolicy field, rather than the new
// loadBalancingConfig field.
@ -96,13 +96,13 @@ void ProcessedResolverResult::ProcessLbPolicyName(
// Otherwise, find the LB policy name set by the client API.
if (lb_policy_name_ == nullptr) {
const grpc_arg* channel_arg =
grpc_channel_args_find(resolver_result, GRPC_ARG_LB_POLICY_NAME);
grpc_channel_args_find(&resolver_result, GRPC_ARG_LB_POLICY_NAME);
lb_policy_name_.reset(gpr_strdup(grpc_channel_arg_get_string(channel_arg)));
}
// Special case: If at least one balancer address is present, we use
// the grpclb policy, regardless of what the resolver has returned.
const ServerAddressList* addresses =
FindServerAddressListChannelArg(resolver_result);
FindServerAddressListChannelArg(&resolver_result);
if (addresses != nullptr) {
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->size(); ++i) {

@ -36,8 +36,7 @@ namespace internal {
class ClientChannelMethodParams;
// A table mapping from a method name to its method parameters.
typedef grpc_core::SliceHashTable<
grpc_core::RefCountedPtr<ClientChannelMethodParams>>
typedef SliceHashTable<RefCountedPtr<ClientChannelMethodParams>>
ClientChannelMethodParamsTable;
// A container of processed fields from the resolver result. Simplifies the
@ -47,33 +46,30 @@ class ProcessedResolverResult {
// Processes the resolver result and populates the relative members
// for later consumption. Tries to parse retry parameters only if parse_retry
// is true.
ProcessedResolverResult(const grpc_channel_args* resolver_result,
ProcessedResolverResult(const grpc_channel_args& resolver_result,
bool parse_retry);
// Getters. Any managed object's ownership is transferred.
grpc_core::UniquePtr<char> service_config_json() {
UniquePtr<char> service_config_json() {
return std::move(service_config_json_);
}
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data() {
return std::move(retry_throttle_data_);
}
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable>
method_params_table() {
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table() {
return std::move(method_params_table_);
}
grpc_core::UniquePtr<char> lb_policy_name() {
return std::move(lb_policy_name_);
}
UniquePtr<char> lb_policy_name() { return std::move(lb_policy_name_); }
grpc_json* lb_policy_config() { return lb_policy_config_; }
private:
// Finds the service config; extracts LB config and (maybe) retry throttle
// params from it.
void ProcessServiceConfig(const grpc_channel_args* resolver_result,
void ProcessServiceConfig(const grpc_channel_args& resolver_result,
bool parse_retry);
// Finds the LB policy name (when no LB config was found).
void ProcessLbPolicyName(const grpc_channel_args* resolver_result);
void ProcessLbPolicyName(const grpc_channel_args& resolver_result);
// Parses the service config. Intended to be used by
// ServiceConfig::ParseGlobalParams.
@ -85,16 +81,16 @@ class ProcessedResolverResult {
void ParseRetryThrottleParamsFromServiceConfig(const grpc_json* field);
// Service config.
grpc_core::UniquePtr<char> service_config_json_;
grpc_core::UniquePtr<grpc_core::ServiceConfig> service_config_;
UniquePtr<char> service_config_json_;
UniquePtr<grpc_core::ServiceConfig> service_config_;
// LB policy.
grpc_json* lb_policy_config_ = nullptr;
grpc_core::UniquePtr<char> lb_policy_name_;
UniquePtr<char> lb_policy_name_;
// Retry throttle data.
char* server_name_ = nullptr;
grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
RefCountedPtr<ServerRetryThrottleData> retry_throttle_data_;
// Method params table.
grpc_core::RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
RefCountedPtr<ClientChannelMethodParamsTable> method_params_table_;
};
// The parameters of a method.

@ -326,6 +326,7 @@ CORE_SOURCE_FILES = [
'src/core/ext/filters/client_channel/parse_address.cc',
'src/core/ext/filters/client_channel/proxy_mapper.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/request_routing.cc',
'src/core/ext/filters/client_channel/resolver.cc',
'src/core/ext/filters/client_channel/resolver_registry.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',

@ -932,6 +932,8 @@ src/core/ext/filters/client_channel/proxy_mapper.cc \
src/core/ext/filters/client_channel/proxy_mapper.h \
src/core/ext/filters/client_channel/proxy_mapper_registry.cc \
src/core/ext/filters/client_channel/proxy_mapper_registry.h \
src/core/ext/filters/client_channel/request_routing.cc \
src/core/ext/filters/client_channel/request_routing.h \
src/core/ext/filters/client_channel/resolver.cc \
src/core/ext/filters/client_channel/resolver.h \
src/core/ext/filters/client_channel/resolver/README.md \

@ -9840,6 +9840,7 @@
"src/core/ext/filters/client_channel/parse_address.h",
"src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h",
"src/core/ext/filters/client_channel/resolver_registry.h",
@ -9882,6 +9883,8 @@
"src/core/ext/filters/client_channel/proxy_mapper.h",
"src/core/ext/filters/client_channel/proxy_mapper_registry.cc",
"src/core/ext/filters/client_channel/proxy_mapper_registry.h",
"src/core/ext/filters/client_channel/request_routing.cc",
"src/core/ext/filters/client_channel/request_routing.h",
"src/core/ext/filters/client_channel/resolver.cc",
"src/core/ext/filters/client_channel/resolver.h",
"src/core/ext/filters/client_channel/resolver_factory.h",

Loading…
Cancel
Save