diff --git a/BUILD b/BUILD
index 004f42f9582..86857601a35 100644
--- a/BUILD
+++ b/BUILD
@@ -1247,27 +1247,66 @@ grpc_cc_library(
],
)
+grpc_cc_library(
+ name = "grpc_xds_client",
+ srcs = [
+ "src/core/ext/filters/client_channel/xds/xds_api.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client.cc",
+ "src/core/ext/filters/client_channel/xds/xds_channel.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.cc",
+ ],
+ hdrs = [
+ "src/core/ext/filters/client_channel/xds/xds_api.h",
+ "src/core/ext/filters/client_channel/xds/xds_client.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel_args.h",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.h",
+ ],
+ language = "c++",
+ deps = [
+ "envoy_ads_upb",
+ "grpc_base",
+ "grpc_client_channel",
+ ],
+)
+
+grpc_cc_library(
+ name = "grpc_xds_client_secure",
+ srcs = [
+ "src/core/ext/filters/client_channel/xds/xds_api.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client.cc",
+ "src/core/ext/filters/client_channel/xds/xds_channel_secure.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.cc",
+ ],
+ hdrs = [
+ "src/core/ext/filters/client_channel/xds/xds_api.h",
+ "src/core/ext/filters/client_channel/xds/xds_client.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel_args.h",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.h",
+ ],
+ language = "c++",
+ deps = [
+ "envoy_ads_upb",
+ "grpc_base",
+ "grpc_client_channel",
+ "grpc_secure",
+ ],
+)
+
grpc_cc_library(
name = "grpc_lb_policy_xds",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h",
],
language = "c++",
deps = [
- "envoy_ads_upb",
"grpc_base",
"grpc_client_channel",
- "grpc_resolver_fake",
- "grpc_transport_chttp2_client_insecure",
+ "grpc_xds_client",
],
)
@@ -1275,24 +1314,15 @@ grpc_cc_library(
name = "grpc_lb_policy_xds_secure",
srcs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc",
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h",
],
language = "c++",
deps = [
- "envoy_ads_upb",
"grpc_base",
"grpc_client_channel",
- "grpc_resolver_fake",
- "grpc_secure",
- "grpc_transport_chttp2_client_secure",
+ "grpc_xds_client_secure",
],
)
@@ -1588,7 +1618,7 @@ grpc_cc_library(
],
hdrs = [
"src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel_args.h",
"src/core/lib/security/context/security_context.h",
"src/core/lib/security/credentials/alts/alts_credentials.h",
"src/core/lib/security/credentials/composite/composite_credentials.h",
diff --git a/BUILD.gn b/BUILD.gn
index 650d54c9eee..0b876285b47 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -246,12 +246,6 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/lb_policy/subchannel_list.h",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.cc",
"src/core/ext/filters/client_channel/lb_policy/xds/xds.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc",
- "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h",
"src/core/ext/filters/client_channel/lb_policy_factory.h",
"src/core/ext/filters/client_channel/lb_policy_registry.cc",
"src/core/ext/filters/client_channel/lb_policy_registry.h",
@@ -302,6 +296,15 @@ config("grpc_config") {
"src/core/ext/filters/client_channel/subchannel_interface.h",
"src/core/ext/filters/client_channel/subchannel_pool_interface.cc",
"src/core/ext/filters/client_channel/subchannel_pool_interface.h",
+ "src/core/ext/filters/client_channel/xds/xds_api.cc",
+ "src/core/ext/filters/client_channel/xds/xds_api.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel_args.h",
+ "src/core/ext/filters/client_channel/xds/xds_channel_secure.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client.h",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.cc",
+ "src/core/ext/filters/client_channel/xds/xds_client_stats.h",
"src/core/ext/filters/client_idle/client_idle_filter.cc",
"src/core/ext/filters/deadline/deadline_filter.cc",
"src/core/ext/filters/deadline/deadline_filter.h",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 59c575f894c..f9c103aa096 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1308,9 +1308,10 @@ add_library(grpc
src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
+ src/core/ext/filters/client_channel/xds/xds_api.cc
+ src/core/ext/filters/client_channel/xds/xds_channel_secure.cc
+ src/core/ext/filters/client_channel/xds/xds_client.cc
+ src/core/ext/filters/client_channel/xds/xds_client_stats.cc
src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c
src/core/ext/upb-generated/envoy/api/v2/cds.upb.c
src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c
@@ -2824,9 +2825,10 @@ add_library(grpc_unsecure
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
- src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
+ src/core/ext/filters/client_channel/xds/xds_api.cc
+ src/core/ext/filters/client_channel/xds/xds_channel.cc
+ src/core/ext/filters/client_channel/xds/xds_client.cc
+ src/core/ext/filters/client_channel/xds/xds_client_stats.cc
src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c
src/core/ext/upb-generated/envoy/api/v2/cds.upb.c
src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c
diff --git a/Makefile b/Makefile
index 6685799a1e4..58f22d23969 100644
--- a/Makefile
+++ b/Makefile
@@ -3836,9 +3836,10 @@ LIBGRPC_SRC = \
src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_channel_secure.cc \
+ src/core/ext/filters/client_channel/xds/xds_client.cc \
+ src/core/ext/filters/client_channel/xds/xds_client_stats.cc \
src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cds.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c \
@@ -5304,9 +5305,10 @@ LIBGRPC_UNSECURE_SRC = \
src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc \
src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_channel.cc \
+ src/core/ext/filters/client_channel/xds/xds_client.cc \
+ src/core/ext/filters/client_channel/xds/xds_client_stats.cc \
src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cds.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c \
@@ -22580,7 +22582,7 @@ ifneq ($(OPENSSL_DEP),)
# installing headers to their final destination on the drive. We need this
# otherwise parallel compilation will fail if a source is compiled first.
src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc: $(OPENSSL_DEP)
-src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc: $(OPENSSL_DEP)
+src/core/ext/filters/client_channel/xds/xds_channel_secure.cc: $(OPENSSL_DEP)
src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc: $(OPENSSL_DEP)
src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc: $(OPENSSL_DEP)
src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc: $(OPENSSL_DEP)
diff --git a/build.yaml b/build.yaml
index 302b61ec394..b976307028f 100644
--- a/build.yaml
+++ b/build.yaml
@@ -1124,38 +1124,23 @@ filegroups:
- name: grpc_lb_policy_xds
headers:
- src/core/ext/filters/client_channel/lb_policy/xds/xds.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
src:
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
plugin: grpc_lb_policy_xds
uses:
- - envoy_ads_upb
- grpc_base
- grpc_client_channel
- - grpc_resolver_fake
+ - grpc_xds_client
- name: grpc_lb_policy_xds_secure
headers:
- src/core/ext/filters/client_channel/lb_policy/xds/xds.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
src:
- src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
- - src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
plugin: grpc_lb_policy_xds
uses:
- - envoy_ads_upb
- grpc_base
- grpc_client_channel
- - grpc_resolver_fake
- - grpc_secure
+ - grpc_xds_client_secure
- name: grpc_lb_subchannel_list
headers:
- src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -1549,6 +1534,39 @@ filegroups:
uses:
- grpc_base
- grpc_server_backward_compatibility
+- name: grpc_xds_client
+ headers:
+ - src/core/ext/filters/client_channel/xds/xds_api.h
+ - src/core/ext/filters/client_channel/xds/xds_channel.h
+ - src/core/ext/filters/client_channel/xds/xds_channel_args.h
+ - src/core/ext/filters/client_channel/xds/xds_client.h
+ - src/core/ext/filters/client_channel/xds/xds_client_stats.h
+ src:
+ - src/core/ext/filters/client_channel/xds/xds_api.cc
+ - src/core/ext/filters/client_channel/xds/xds_channel.cc
+ - src/core/ext/filters/client_channel/xds/xds_client.cc
+ - src/core/ext/filters/client_channel/xds/xds_client_stats.cc
+ uses:
+ - envoy_ads_upb
+ - grpc_base
+ - grpc_client_channel
+- name: grpc_xds_client_secure
+ headers:
+ - src/core/ext/filters/client_channel/xds/xds_api.h
+ - src/core/ext/filters/client_channel/xds/xds_channel.h
+ - src/core/ext/filters/client_channel/xds/xds_channel_args.h
+ - src/core/ext/filters/client_channel/xds/xds_client.h
+ - src/core/ext/filters/client_channel/xds/xds_client_stats.h
+ src:
+ - src/core/ext/filters/client_channel/xds/xds_api.cc
+ - src/core/ext/filters/client_channel/xds/xds_channel_secure.cc
+ - src/core/ext/filters/client_channel/xds/xds_client.cc
+ - src/core/ext/filters/client_channel/xds/xds_client_stats.cc
+ uses:
+ - envoy_ads_upb
+ - grpc_base
+ - grpc_client_channel
+ - grpc_secure
- name: grpcpp_channelz_proto
src:
- src/proto/grpc/channelz/channelz.proto
diff --git a/config.m4 b/config.m4
index c4ed960fdec..32b54d4676c 100644
--- a/config.m4
+++ b/config.m4
@@ -417,9 +417,10 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c \
src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc \
- src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_api.cc \
+ src/core/ext/filters/client_channel/xds/xds_channel_secure.cc \
+ src/core/ext/filters/client_channel/xds/xds_client.cc \
+ src/core/ext/filters/client_channel/xds/xds_client_stats.cc \
src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cds.upb.c \
src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c \
@@ -742,6 +743,7 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/fake)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/sockaddr)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/resolver/xds)
+ PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_channel/xds)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/client_idle)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/deadline)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/ext/filters/http)
diff --git a/config.w32 b/config.w32
index b22a1481e86..e6565539d82 100644
--- a/config.w32
+++ b/config.w32
@@ -387,9 +387,10 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\upb-generated\\src\\proto\\grpc\\lb\\v1\\load_balancer.upb.c " +
"src\\core\\ext\\filters\\client_channel\\resolver\\fake\\fake_resolver.cc " +
"src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds.cc " +
- "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds_channel_secure.cc " +
- "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds_client_stats.cc " +
- "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds_load_balancer_api.cc " +
+ "src\\core\\ext\\filters\\client_channel\\xds\\xds_api.cc " +
+ "src\\core\\ext\\filters\\client_channel\\xds\\xds_channel_secure.cc " +
+ "src\\core\\ext\\filters\\client_channel\\xds\\xds_client.cc " +
+ "src\\core\\ext\\filters\\client_channel\\xds\\xds_client_stats.cc " +
"src\\core\\ext\\upb-generated\\envoy\\api\\v2\\auth\\cert.upb.c " +
"src\\core\\ext\\upb-generated\\envoy\\api\\v2\\cds.upb.c " +
"src\\core\\ext\\upb-generated\\envoy\\api\\v2\\cluster\\circuit_breaker.upb.c " +
@@ -743,6 +744,7 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\fake");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\sockaddr");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\resolver\\xds");
+ FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_channel\\xds");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\client_idle");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\deadline");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\ext\\filters\\http");
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 02718c7a40a..454241dde87 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -555,9 +555,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h',
+ 'src/core/ext/filters/client_channel/xds/xds_api.h',
+ 'src/core/ext/filters/client_channel/xds/xds_channel.h',
+ 'src/core/ext/filters/client_channel/xds/xds_channel_args.h',
+ 'src/core/ext/filters/client_channel/xds/xds_client.h',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.h',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.h',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.h',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.h',
@@ -915,9 +917,10 @@ Pod::Spec.new do |s|
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_channel_secure.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
@@ -1290,9 +1293,11 @@ Pod::Spec.new do |s|
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.h',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h',
+ 'src/core/ext/filters/client_channel/xds/xds_api.h',
+ 'src/core/ext/filters/client_channel/xds/xds_channel.h',
+ 'src/core/ext/filters/client_channel/xds/xds_channel_args.h',
+ 'src/core/ext/filters/client_channel/xds/xds_client.h',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.h',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.h',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.h',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.h',
diff --git a/grpc.gemspec b/grpc.gemspec
index 1bfd393a7c6..49554bab1b2 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -485,9 +485,11 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h )
s.files += %w( src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.h )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_api.h )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_channel.h )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_channel_args.h )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_client.h )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_client_stats.h )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.h )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/cds.upb.h )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.h )
@@ -845,9 +847,10 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c )
s.files += %w( src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc )
s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.cc )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc )
- s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_api.cc )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_channel_secure.cc )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_client.cc )
+ s.files += %w( src/core/ext/filters/client_channel/xds/xds_client_stats.cc )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/cds.upb.c )
s.files += %w( src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c )
diff --git a/grpc.gyp b/grpc.gyp
index 73967f47532..46e9ca39cde 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -555,9 +555,10 @@
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_channel_secure.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
@@ -1421,9 +1422,10 @@
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_channel.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
diff --git a/package.xml b/package.xml
index 5a3ad6b229f..adc371c1b5f 100644
--- a/package.xml
+++ b/package.xml
@@ -490,9 +490,11 @@
-
-
-
+
+
+
+
+
@@ -850,9 +852,10 @@
-
-
-
+
+
+
+
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
index 468c8c759fd..5cd2ea86c23 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
@@ -16,43 +16,6 @@
*
*/
-/// Implementation of the gRPC LB policy.
-///
-/// This policy takes as input a list of resolved addresses, which must
-/// include at least one balancer address.
-///
-/// An internal channel (\a lb_channel_) is created for the addresses
-/// from that are balancers. This channel behaves just like a regular
-/// channel that uses pick_first to select from the list of balancer
-/// addresses.
-///
-/// When we get our initial update, we instantiate the internal *streaming*
-/// call to the LB server (whichever address pick_first chose). The call
-/// will be complete when either the balancer sends status or when we cancel
-/// the call (e.g., because we are shutting down). In needed, we retry the
-/// call. If we received at least one valid message from the server, a new
-/// call attempt will be made immediately; otherwise, we apply back-off
-/// delays between attempts.
-///
-/// We maintain an internal child policy (round_robin) instance for distributing
-/// requests across backends. Whenever we receive a new serverlist from
-/// the balancer, we update the child policy with the new list of
-/// addresses.
-///
-/// Once a child policy instance is in place (and getting updated as
-/// described), calls for a pick, or a cancellation will be serviced right away
-/// by forwarding them to the child policy instance. Any time there's no child
-/// policy available (i.e., right after the creation of the xDS policy), pick
-/// requests are added to a list of pending picks to be flushed and serviced
-/// when the child policy instance becomes available.
-///
-/// \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
-/// high level design and details.
-
-// With the addition of a libuv endpoint, sockaddr.h now includes uv.h when
-// using that endpoint. Because of various transitive includes in uv.h,
-// including windows.h on Windows, uv.h must be included before other system
-// headers. Therefore, sockaddr.h must always be included first.
#include
#include "src/core/lib/iomgr/sockaddr.h"
@@ -62,25 +25,21 @@
#include
#include
-#include
#include
#include
#include
#include
-#include "include/grpc/support/alloc.h"
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/client_channel/lb_policy.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h"
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h"
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h"
#include "src/core/ext/filters/client_channel/lb_policy_factory.h"
#include "src/core/ext/filters/client_channel/lb_policy_registry.h"
#include "src/core/ext/filters/client_channel/parse_address.h"
-#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/filters/client_channel/service_config.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
@@ -103,12 +62,7 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/static_metadata.h"
-#define GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS 1
-#define GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER 1.6
-#define GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS 120
-#define GRPC_XDS_RECONNECT_JITTER 0.2
#define GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS 10000
-#define GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS 1000
#define GRPC_XDS_DEFAULT_LOCALITY_RETENTION_INTERVAL_MS (15 * 60 * 1000)
#define GRPC_XDS_DEFAULT_FAILOVER_TIMEOUT_MS 10000
@@ -157,220 +111,7 @@ class XdsLb : public LoadBalancingPolicy {
void ResetBackoffLocked() override;
private:
- // Contains a channel to the LB server and all the data related to the
- // channel. Holds a ref to the xds policy.
- class LbChannelState : public InternallyRefCounted {
- public:
- // An LB call wrapper that can restart a call upon failure. Holds a ref to
- // the LB channel. The template parameter is the kind of wrapped LB call.
- template
- class RetryableLbCall : public InternallyRefCounted> {
- public:
- explicit RetryableLbCall(RefCountedPtr lb_chand);
-
- void Orphan() override;
-
- void OnCallFinishedLocked();
-
- T* lb_calld() const { return lb_calld_.get(); }
- LbChannelState* lb_chand() const { return lb_chand_.get(); }
-
- private:
- void StartNewCallLocked();
- void StartRetryTimerLocked();
- static void OnRetryTimerLocked(void* arg, grpc_error* error);
-
- // The wrapped LB call that talks to the LB server. It's instantiated
- // every time we start a new call. It's null during call retry backoff.
- OrphanablePtr lb_calld_;
- // The owing LB channel.
- RefCountedPtr lb_chand_;
-
- // Retry state.
- BackOff backoff_;
- grpc_timer retry_timer_;
- grpc_closure on_retry_timer_;
- bool retry_timer_callback_pending_ = false;
-
- bool shutting_down_ = false;
- };
-
- // Contains an EDS call to the LB server.
- class EdsCallState : public InternallyRefCounted {
- public:
- // The ctor and dtor should not be used directly.
- explicit EdsCallState(
- RefCountedPtr> parent);
- ~EdsCallState() override;
-
- void Orphan() override;
-
- RetryableLbCall* parent() const { return parent_.get(); }
- LbChannelState* lb_chand() const { return parent_->lb_chand(); }
- XdsLb* xdslb_policy() const { return lb_chand()->xdslb_policy(); }
- bool seen_response() const { return seen_response_; }
-
- private:
- static void OnResponseReceivedLocked(void* arg, grpc_error* error);
- static void OnStatusReceivedLocked(void* arg, grpc_error* error);
-
- bool IsCurrentCallOnChannel() const;
-
- // The owning RetryableLbCall<>.
- RefCountedPtr> parent_;
- bool seen_response_ = false;
-
- // Always non-NULL.
- grpc_call* lb_call_;
-
- // recv_initial_metadata
- grpc_metadata_array initial_metadata_recv_;
-
- // send_message
- grpc_byte_buffer* send_message_payload_ = nullptr;
-
- // recv_message
- grpc_byte_buffer* recv_message_payload_ = nullptr;
- grpc_closure on_response_received_;
-
- // recv_trailing_metadata
- grpc_metadata_array trailing_metadata_recv_;
- grpc_status_code status_code_;
- grpc_slice status_details_;
- grpc_closure on_status_received_;
- };
-
- // Contains an LRS call to the LB server.
- class LrsCallState : public InternallyRefCounted {
- public:
- // The ctor and dtor should not be used directly.
- explicit LrsCallState(
- RefCountedPtr> parent);
- ~LrsCallState() override;
-
- void Orphan() override;
-
- void MaybeStartReportingLocked();
-
- RetryableLbCall* parent() { return parent_.get(); }
- LbChannelState* lb_chand() const { return parent_->lb_chand(); }
- XdsLb* xdslb_policy() const { return lb_chand()->xdslb_policy(); }
- bool seen_response() const { return seen_response_; }
-
- private:
- // Reports client-side load stats according to a fixed interval.
- class Reporter : public InternallyRefCounted {
- public:
- Reporter(RefCountedPtr parent,
- grpc_millis report_interval)
- : parent_(std::move(parent)), report_interval_(report_interval) {
- GRPC_CLOSURE_INIT(
- &on_next_report_timer_, OnNextReportTimerLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- GRPC_CLOSURE_INIT(
- &on_report_done_, OnReportDoneLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- ScheduleNextReportLocked();
- }
-
- void Orphan() override;
-
- private:
- void ScheduleNextReportLocked();
- static void OnNextReportTimerLocked(void* arg, grpc_error* error);
- void SendReportLocked();
- static void OnReportDoneLocked(void* arg, grpc_error* error);
-
- bool IsCurrentReporterOnCall() const {
- return this == parent_->reporter_.get();
- }
- XdsLb* xdslb_policy() const { return parent_->xdslb_policy(); }
-
- // The owning LRS call.
- RefCountedPtr parent_;
-
- // The load reporting state.
- const grpc_millis report_interval_;
- bool last_report_counters_were_zero_ = false;
- bool next_report_timer_callback_pending_ = false;
- grpc_timer next_report_timer_;
- grpc_closure on_next_report_timer_;
- grpc_closure on_report_done_;
- };
-
- static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
- static void OnResponseReceivedLocked(void* arg, grpc_error* error);
- static void OnStatusReceivedLocked(void* arg, grpc_error* error);
-
- bool IsCurrentCallOnChannel() const;
-
- // The owning RetryableLbCall<>.
- RefCountedPtr> parent_;
- bool seen_response_ = false;
-
- // Always non-NULL.
- grpc_call* lb_call_;
-
- // recv_initial_metadata
- grpc_metadata_array initial_metadata_recv_;
-
- // send_message
- grpc_byte_buffer* send_message_payload_ = nullptr;
- grpc_closure on_initial_request_sent_;
-
- // recv_message
- grpc_byte_buffer* recv_message_payload_ = nullptr;
- grpc_closure on_response_received_;
-
- // recv_trailing_metadata
- grpc_metadata_array trailing_metadata_recv_;
- grpc_status_code status_code_;
- grpc_slice status_details_;
- grpc_closure on_status_received_;
-
- // Load reporting state.
- UniquePtr cluster_name_;
- grpc_millis load_reporting_interval_ = 0;
- OrphanablePtr reporter_;
- };
-
- LbChannelState(RefCountedPtr xdslb_policy, const char* balancer_name,
- const grpc_channel_args& args);
- ~LbChannelState();
-
- void Orphan() override;
-
- grpc_channel* channel() const { return channel_; }
- XdsLb* xdslb_policy() const { return xdslb_policy_.get(); }
- EdsCallState* eds_calld() const { return eds_calld_->lb_calld(); }
- LrsCallState* lrs_calld() const { return lrs_calld_->lb_calld(); }
-
- bool IsCurrentChannel() const {
- return this == xdslb_policy_->lb_chand_.get();
- }
- bool IsPendingChannel() const {
- return this == xdslb_policy_->pending_lb_chand_.get();
- }
- bool HasActiveEdsCall() const { return eds_calld_->lb_calld() != nullptr; }
-
- void StartConnectivityWatchLocked();
- void CancelConnectivityWatchLocked();
-
- private:
- class StateWatcher;
-
- // The owning LB policy.
- RefCountedPtr xdslb_policy_;
-
- // The channel and its status.
- grpc_channel* channel_;
- bool shutting_down_ = false;
- StateWatcher* watcher_ = nullptr;
-
- // The retryable XDS calls to the LB server.
- OrphanablePtr> eds_calld_;
- OrphanablePtr> lrs_calld_;
- };
+ class EndpointWatcher;
// We need this wrapper for the following reasons:
// 1. To process per-locality load reporting.
@@ -488,7 +229,9 @@ class XdsLb : public LoadBalancingPolicy {
const grpc_channel_args& args) override;
void UpdateState(grpc_connectivity_state state,
UniquePtr picker) override;
- void RequestReresolution() override;
+ // This is a no-op, because we get the addresses from the xds
+ // client, which is a watch-based API.
+ void RequestReresolution() override {}
void AddTraceEvent(TraceSeverity severity,
StringView message) override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
@@ -627,21 +370,12 @@ class XdsLb : public LoadBalancingPolicy {
void ShutdownLocked() override;
- // Helper function used in UpdateLocked().
- void ProcessAddressesAndChannelArgsLocked(ServerAddressList addresses,
- const grpc_channel_args& args);
-
// Parses the xds config given the JSON node of the first child of XdsConfig.
// If parsing succeeds, updates \a balancer_name, and updates \a
// child_policy_config_ and \a fallback_policy_config_ if they are also
// found. Does nothing upon failure.
void ParseLbConfig(const ParsedXdsConfig* xds_config);
- LbChannelState* LatestLbChannel() const {
- return pending_lb_chand_ != nullptr ? pending_lb_chand_.get()
- : lb_chand_.get();
- }
-
// Methods for dealing with fallback state.
void MaybeCancelFallbackAtStartupChecks();
static void OnFallbackTimerLocked(void* arg, grpc_error* error);
@@ -657,25 +391,24 @@ class XdsLb : public LoadBalancingPolicy {
UniquePtr balancer_name_;
// Current channel args from the resolver.
- grpc_channel_args* args_ = nullptr;
+ const grpc_channel_args* args_ = nullptr;
// Internal state.
bool shutting_down_ = false;
- // The channel for communicating with the LB server.
- OrphanablePtr lb_chand_;
- OrphanablePtr pending_lb_chand_;
-
- // Timeout in milliseconds for the LB call. 0 means no deadline.
- const grpc_millis lb_call_timeout_ms_;
+ // The xds client.
+ OrphanablePtr xds_client_;
+ // A pointer to the endpoint watcher, to be used when cancelling the watch.
+ // Note that this is not owned, so this pointer must never be derefernced.
+ EndpointWatcher* endpoint_watcher_ = nullptr;
// Whether the checks for fallback at startup are ALL pending. There are
// several cases where this can be reset:
// 1. The fallback timer fires, we enter fallback mode.
- // 2. Before the fallback timer fires, the LB channel becomes
- // TRANSIENT_FAILURE or the LB call fails, we enter fallback mode.
+ // 2. Before the fallback timer fires, the endpoint watcher reports an
+ // error, we enter fallback mode.
// 3. Before the fallback timer fires, if any child policy in the locality map
- // becomes READY, we cancel the fallback timer.
+ // becomes READY, we cancel the fallback timer.
bool fallback_at_startup_checks_pending_ = false;
// Timeout in milliseconds for before using fallback backend addresses.
// 0 means not using fallback.
@@ -839,7 +572,6 @@ void XdsLb::FallbackHelper::RequestReresolution() {
"[xdslb %p] Re-resolution requested from the fallback policy (%p).",
parent_.get(), child_);
}
- GPR_ASSERT(parent_->lb_chand_ != nullptr);
parent_->channel_control_helper()->RequestReresolution();
}
@@ -853,931 +585,75 @@ void XdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity,
}
//
-// XdsLb::LbChannelState::StateWatcher
+// XdsLb::EndpointWatcher
//
-class XdsLb::LbChannelState::StateWatcher
- : public AsyncConnectivityStateWatcherInterface {
+class XdsLb::EndpointWatcher : public XdsClient::EndpointWatcherInterface {
public:
- explicit StateWatcher(RefCountedPtr parent)
- : AsyncConnectivityStateWatcherInterface(
- grpc_combiner_scheduler(parent->xdslb_policy_->combiner())),
- parent_(std::move(parent)) {}
-
- private:
- void OnConnectivityStateChange(grpc_connectivity_state new_state) override {
- if (!parent_->shutting_down_ &&
- parent_->xdslb_policy_->fallback_at_startup_checks_pending_ &&
- new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
- // In TRANSIENT_FAILURE. Cancel the fallback timer and go into
- // fallback mode immediately.
- gpr_log(GPR_INFO,
- "[xdslb %p] Balancer channel in state TRANSIENT_FAILURE; "
- "entering fallback mode",
- parent_->xdslb_policy_.get());
- parent_->xdslb_policy_->fallback_at_startup_checks_pending_ = false;
- grpc_timer_cancel(&parent_->xdslb_policy_->lb_fallback_timer_);
- parent_->xdslb_policy_->UpdateFallbackPolicyLocked();
- parent_->CancelConnectivityWatchLocked();
- }
- }
-
- RefCountedPtr parent_;
-};
-
-//
-// XdsLb::LbChannelState
-//
-
-XdsLb::LbChannelState::LbChannelState(RefCountedPtr xdslb_policy,
- const char* balancer_name,
- const grpc_channel_args& args)
- : InternallyRefCounted(&grpc_lb_xds_trace),
- xdslb_policy_(std::move(xdslb_policy)) {
- channel_ = CreateXdsBalancerChannel(balancer_name, args);
- GPR_ASSERT(channel_ != nullptr);
- eds_calld_.reset(New>(
- Ref(DEBUG_LOCATION, "LbChannelState+eds")));
- lrs_calld_.reset(New>(
- Ref(DEBUG_LOCATION, "LbChannelState+lrs")));
-}
-
-XdsLb::LbChannelState::~LbChannelState() {
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO, "[xdslb %p] Destroying LB channel %p", xdslb_policy(),
- this);
- }
- grpc_channel_destroy(channel_);
-}
-
-void XdsLb::LbChannelState::Orphan() {
- shutting_down_ = true;
- eds_calld_.reset();
- lrs_calld_.reset();
- Unref(DEBUG_LOCATION, "LbChannelState+orphaned");
-}
-
-void XdsLb::LbChannelState::StartConnectivityWatchLocked() {
- grpc_channel_element* client_channel_elem =
- grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_));
- GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
- auto watcher = MakeOrphanable(Ref());
- watcher_ = watcher.get();
- grpc_client_channel_start_connectivity_watch(
- client_channel_elem, GRPC_CHANNEL_IDLE, std::move(watcher));
-}
-
-void XdsLb::LbChannelState::CancelConnectivityWatchLocked() {
- grpc_channel_element* client_channel_elem =
- grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_));
- GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
- grpc_client_channel_stop_connectivity_watch(client_channel_elem, watcher_);
-}
-
-//
-// XdsLb::LbChannelState::RetryableLbCall<>
-//
-
-template
-XdsLb::LbChannelState::RetryableLbCall::RetryableLbCall(
- RefCountedPtr lb_chand)
- : lb_chand_(std::move(lb_chand)),
- backoff_(
- BackOff::Options()
- .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS *
- 1000)
- .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
- .set_jitter(GRPC_XDS_RECONNECT_JITTER)
- .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
- GRPC_CLOSURE_INIT(
- &on_retry_timer_, OnRetryTimerLocked, this,
- grpc_combiner_scheduler(lb_chand_->xdslb_policy()->combiner()));
- StartNewCallLocked();
-}
-
-template
-void XdsLb::LbChannelState::RetryableLbCall::Orphan() {
- shutting_down_ = true;
- lb_calld_.reset();
- if (retry_timer_callback_pending_) grpc_timer_cancel(&retry_timer_);
- this->Unref(DEBUG_LOCATION, "RetryableLbCall+orphaned");
-}
-
-template
-void XdsLb::LbChannelState::RetryableLbCall::OnCallFinishedLocked() {
- const bool seen_response = lb_calld_->seen_response();
- lb_calld_.reset();
- if (seen_response) {
- // If we lost connection to the LB server, reset backoff and restart the LB
- // call immediately.
- backoff_.Reset();
- StartNewCallLocked();
- } else {
- // If we failed to connect to the LB server, retry later.
- StartRetryTimerLocked();
- }
-}
-
-template
-void XdsLb::LbChannelState::RetryableLbCall::StartNewCallLocked() {
- if (shutting_down_) return;
- GPR_ASSERT(lb_chand_->channel_ != nullptr);
- GPR_ASSERT(lb_calld_ == nullptr);
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Start new call from retryable call (lb_chand: %p, "
- "retryable call: %p)",
- lb_chand()->xdslb_policy(), lb_chand(), this);
- }
- lb_calld_ = MakeOrphanable(
- this->Ref(DEBUG_LOCATION, "RetryableLbCall+start_new_call"));
-}
-
-template
-void XdsLb::LbChannelState::RetryableLbCall::StartRetryTimerLocked() {
- if (shutting_down_) return;
- const grpc_millis next_attempt_time = backoff_.NextAttemptTime();
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- grpc_millis timeout = GPR_MAX(next_attempt_time - ExecCtx::Get()->Now(), 0);
- gpr_log(GPR_INFO,
- "[xdslb %p] Failed to connect to LB server (lb_chand: %p) "
- "retry timer will fire in %" PRId64 "ms.",
- lb_chand()->xdslb_policy(), lb_chand(), timeout);
- }
- this->Ref(DEBUG_LOCATION, "RetryableLbCall+retry_timer_start").release();
- grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_);
- retry_timer_callback_pending_ = true;
-}
-
-template
-void XdsLb::LbChannelState::RetryableLbCall::OnRetryTimerLocked(
- void* arg, grpc_error* error) {
- RetryableLbCall* lb_calld = static_cast(arg);
- lb_calld->retry_timer_callback_pending_ = false;
- if (!lb_calld->shutting_down_ && error == GRPC_ERROR_NONE) {
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Retry timer fires (lb_chand: %p, retryable call: %p)",
- lb_calld->lb_chand()->xdslb_policy(), lb_calld->lb_chand(),
- lb_calld);
- }
- lb_calld->StartNewCallLocked();
- }
- lb_calld->Unref(DEBUG_LOCATION, "RetryableLbCall+retry_timer_done");
-}
-
-//
-// XdsLb::LbChannelState::EdsCallState
-//
-
-XdsLb::LbChannelState::EdsCallState::EdsCallState(
- RefCountedPtr> parent)
- : InternallyRefCounted(&grpc_lb_xds_trace),
- parent_(std::move(parent)) {
- // Init the LB call. Note that the LB call will progress every time there's
- // activity in xdslb_policy()->interested_parties(), which is comprised of
- // the polling entities from client_channel.
- GPR_ASSERT(xdslb_policy() != nullptr);
- GPR_ASSERT(xdslb_policy()->server_name_ != nullptr);
- GPR_ASSERT(xdslb_policy()->server_name_[0] != '\0');
- const grpc_millis deadline =
- xdslb_policy()->lb_call_timeout_ms_ == 0
- ? GRPC_MILLIS_INF_FUTURE
- : ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_;
- // Create an LB call with the specified method name.
- lb_call_ = grpc_channel_create_pollset_set_call(
- lb_chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
- xdslb_policy()->interested_parties(),
- GRPC_MDSTR_SLASH_ENVOY_DOT_API_DOT_V2_DOT_ENDPOINTDISCOVERYSERVICE_SLASH_STREAMENDPOINTS,
- nullptr, deadline, nullptr);
- GPR_ASSERT(lb_call_ != nullptr);
- // Init the LB call request payload.
- grpc_slice request_payload_slice =
- XdsEdsRequestCreateAndEncode(xdslb_policy()->server_name_);
- send_message_payload_ =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
- // Init other data associated with the LB call.
- grpc_metadata_array_init(&initial_metadata_recv_);
- grpc_metadata_array_init(&trailing_metadata_recv_);
- GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- // Start the call.
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Starting EDS call (lb_chand: %p, lb_calld: %p, "
- "lb_call: %p)",
- xdslb_policy(), lb_chand(), this, lb_call_);
- }
- // Create the ops.
- grpc_call_error call_error;
- grpc_op ops[3];
- memset(ops, 0, sizeof(ops));
- // Op: send initial metadata.
- grpc_op* op = ops;
- op->op = GRPC_OP_SEND_INITIAL_METADATA;
- op->data.send_initial_metadata.count = 0;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // Op: send request message.
- GPR_ASSERT(send_message_payload_ != nullptr);
- op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message.send_message = send_message_payload_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- call_error = grpc_call_start_batch_and_execute(lb_call_, ops,
- (size_t)(op - ops), nullptr);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- // Op: recv initial metadata.
- op = ops;
- op->op = GRPC_OP_RECV_INITIAL_METADATA;
- op->data.recv_initial_metadata.recv_initial_metadata =
- &initial_metadata_recv_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // Op: recv response.
- op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message.recv_message = &recv_message_payload_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- Ref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked").release();
- call_error = grpc_call_start_batch_and_execute(
- lb_call_, ops, (size_t)(op - ops), &on_response_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- // Op: recv server status.
- op = ops;
- op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
- op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
- op->data.recv_status_on_client.status = &status_code_;
- op->data.recv_status_on_client.status_details = &status_details_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // This callback signals the end of the LB call, so it relies on the initial
- // ref instead of a new ref. When it's invoked, it's the initial ref that is
- // unreffed.
- call_error = grpc_call_start_batch_and_execute(
- lb_call_, ops, (size_t)(op - ops), &on_status_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
+ explicit EndpointWatcher(RefCountedPtr xds_policy)
+ : xds_policy_(std::move(xds_policy)) {}
-XdsLb::LbChannelState::EdsCallState::~EdsCallState() {
- grpc_metadata_array_destroy(&initial_metadata_recv_);
- grpc_metadata_array_destroy(&trailing_metadata_recv_);
- grpc_byte_buffer_destroy(send_message_payload_);
- grpc_byte_buffer_destroy(recv_message_payload_);
- grpc_slice_unref_internal(status_details_);
- GPR_ASSERT(lb_call_ != nullptr);
- grpc_call_unref(lb_call_);
-}
-
-void XdsLb::LbChannelState::EdsCallState::Orphan() {
- GPR_ASSERT(lb_call_ != nullptr);
- // If we are here because xdslb_policy wants to cancel the call,
- // on_status_received_ will complete the cancellation and clean up. Otherwise,
- // we are here because xdslb_policy has to orphan a failed call, then the
- // following cancellation will be a no-op.
- grpc_call_cancel(lb_call_, nullptr);
- // Note that the initial ref is hold by on_status_received_. So the
- // corresponding unref happens in on_status_received_ instead of here.
-}
-
-void XdsLb::LbChannelState::EdsCallState::OnResponseReceivedLocked(
- void* arg, grpc_error* error) {
- EdsCallState* eds_calld = static_cast(arg);
- LbChannelState* lb_chand = eds_calld->lb_chand();
- XdsLb* xdslb_policy = eds_calld->xdslb_policy();
- // Empty payload means the LB call was cancelled.
- if (!eds_calld->IsCurrentCallOnChannel() ||
- eds_calld->recv_message_payload_ == nullptr) {
- eds_calld->Unref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked");
- return;
- }
- // Read the response.
- grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, eds_calld->recv_message_payload_);
- grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
- grpc_byte_buffer_reader_destroy(&bbr);
- grpc_byte_buffer_destroy(eds_calld->recv_message_payload_);
- eds_calld->recv_message_payload_ = nullptr;
- // TODO(juanlishen): When we convert this to use the xds protocol, the
- // balancer will send us a fallback timeout such that we should go into
- // fallback mode if we have lost contact with the balancer after a certain
- // period of time. We will need to save the timeout value here, and then
- // when the balancer call ends, we will need to start a timer for the
- // specified period of time, and if the timer fires, we go into fallback
- // mode. We will also need to cancel the timer when we receive a serverlist
- // from the balancer.
- // This anonymous lambda is a hack to avoid the usage of goto.
- [&]() {
- // Parse the response.
- XdsUpdate update;
- grpc_error* parse_error =
- XdsEdsResponseDecodeAndParse(response_slice, &update);
- if (parse_error != GRPC_ERROR_NONE) {
- gpr_log(GPR_ERROR, "[xdslb %p] EDS response parsing failed. error=%s",
- xdslb_policy, grpc_error_string(parse_error));
- GRPC_ERROR_UNREF(parse_error);
- return;
- }
- if (update.priority_list_update.empty() && !update.drop_all) {
- char* response_slice_str =
- grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX);
- gpr_log(GPR_ERROR,
- "[xdslb %p] EDS response '%s' doesn't contain any valid locality "
- "but doesn't require to drop all calls. Ignoring.",
- xdslb_policy, response_slice_str);
- gpr_free(response_slice_str);
- return;
- }
- eds_calld->seen_response_ = true;
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] EDS response with %" PRIuPTR
- " priorities and %" PRIuPTR
- " drop categories received (drop_all=%d)",
- xdslb_policy, update.priority_list_update.size(),
- update.drop_config->drop_category_list().size(), update.drop_all);
- for (size_t priority = 0; priority < update.priority_list_update.size();
- ++priority) {
- const auto* locality_map_update =
- update.priority_list_update.Find(static_cast(priority));
- gpr_log(GPR_INFO,
- "[xdslb %p] Priority %" PRIuPTR " contains %" PRIuPTR
- " localities",
- xdslb_policy, priority, locality_map_update->size());
- size_t locality_count = 0;
- for (const auto& p : locality_map_update->localities) {
- const auto& locality = p.second;
- gpr_log(GPR_INFO,
- "[xdslb %p] Priority %" PRIuPTR ", locality %" PRIuPTR
- " %s contains %" PRIuPTR " server addresses",
- xdslb_policy, priority, locality_count,
- locality.name->AsHumanReadableString(),
- locality.serverlist.size());
- for (size_t i = 0; i < locality.serverlist.size(); ++i) {
- char* ipport;
- grpc_sockaddr_to_string(&ipport, &locality.serverlist[i].address(),
- false);
- gpr_log(GPR_INFO,
- "[xdslb %p] Priority %" PRIuPTR ", locality %" PRIuPTR
- " %s, server address %" PRIuPTR ": %s",
- xdslb_policy, priority, locality_count,
- locality.name->AsHumanReadableString(), i, ipport);
- gpr_free(ipport);
- }
- ++locality_count;
- }
- for (size_t i = 0; i < update.drop_config->drop_category_list().size();
- ++i) {
- const XdsDropConfig::DropCategory& drop_category =
- update.drop_config->drop_category_list()[i];
- gpr_log(GPR_INFO,
- "[xdslb %p] Drop category %s has drop rate %d per million",
- xdslb_policy, drop_category.name.get(),
- drop_category.parts_per_million);
- }
- }
- }
- // Pending LB channel receives a response; promote it.
- // Note that this call can't be on a discarded pending channel, because
- // such channels don't have any current call but we have checked this call
- // is a current call.
- if (!lb_chand->IsCurrentChannel()) {
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Pending LB channel %p receives EDS response; "
- "promoting it to replace current LB channel %p",
- xdslb_policy, lb_chand, xdslb_policy->lb_chand_.get());
- }
- // TODO(juanlishen): Maybe promote the pending LB channel when the
- // response results a READY locality map.
- xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_);
- }
- // At this point, lb_chand must be the current LB channel, so try to start
- // load reporting.
- LrsCallState* lrs_calld = lb_chand->lrs_calld_->lb_calld();
- if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked();
+ void OnEndpointChanged(EdsUpdate update) override {
// If the balancer tells us to drop all the calls, we should exit fallback
// mode immediately.
- if (update.drop_all) xdslb_policy->MaybeExitFallbackMode();
+ if (update.drop_all) xds_policy_->MaybeExitFallbackMode();
// Update the drop config.
const bool drop_config_changed =
- xdslb_policy->drop_config_ == nullptr ||
- *xdslb_policy->drop_config_ != *update.drop_config;
- xdslb_policy->drop_config_ = std::move(update.drop_config);
+ xds_policy_->drop_config_ == nullptr ||
+ *xds_policy_->drop_config_ != *update.drop_config;
+ xds_policy_->drop_config_ = std::move(update.drop_config);
// Ignore identical locality update.
- if (xdslb_policy->priority_list_update_ == update.priority_list_update) {
+ if (xds_policy_->priority_list_update_ == update.priority_list_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO,
"[xdslb %p] Incoming locality update identical to current, "
"ignoring. (drop_config_changed=%d)",
- xdslb_policy, drop_config_changed);
+ xds_policy_.get(), drop_config_changed);
}
if (drop_config_changed) {
- xdslb_policy->priority_list_.UpdateXdsPickerLocked();
+ xds_policy_->priority_list_.UpdateXdsPickerLocked();
}
return;
}
// Update the priority list.
- xdslb_policy->priority_list_update_ =
- std::move(update.priority_list_update);
- xdslb_policy->priority_list_.UpdateLocked();
- }();
- grpc_slice_unref_internal(response_slice);
- if (xdslb_policy->shutting_down_) {
- eds_calld->Unref(DEBUG_LOCATION,
- "EDS+OnResponseReceivedLocked+xds_shutdown");
- return;
+ xds_policy_->priority_list_update_ = std::move(update.priority_list_update);
+ xds_policy_->priority_list_.UpdateLocked();
}
- // Keep listening for serverlist updates.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_RECV_MESSAGE;
- op.data.recv_message.recv_message = &eds_calld->recv_message_payload_;
- op.flags = 0;
- op.reserved = nullptr;
- GPR_ASSERT(eds_calld->lb_call_ != nullptr);
- // Reuse the "EDS+OnResponseReceivedLocked" ref taken in ctor.
- const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- eds_calld->lb_call_, &op, 1, &eds_calld->on_response_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-void XdsLb::LbChannelState::EdsCallState::OnStatusReceivedLocked(
- void* arg, grpc_error* error) {
- EdsCallState* eds_calld = static_cast(arg);
- LbChannelState* lb_chand = eds_calld->lb_chand();
- XdsLb* xdslb_policy = eds_calld->xdslb_policy();
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- char* status_details = grpc_slice_to_c_string(eds_calld->status_details_);
- gpr_log(GPR_INFO,
- "[xdslb %p] EDS call status received. Status = %d, details "
- "= '%s', (lb_chand: %p, eds_calld: %p, lb_call: %p), error '%s'",
- xdslb_policy, eds_calld->status_code_, status_details, lb_chand,
- eds_calld, eds_calld->lb_call_, grpc_error_string(error));
- gpr_free(status_details);
- }
- // Ignore status from a stale call.
- if (eds_calld->IsCurrentCallOnChannel()) {
- // Because this call is the current one on the channel, the channel can't
- // have been swapped out; otherwise, the call should have been reset.
- GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel());
- if (lb_chand != xdslb_policy->LatestLbChannel()) {
- // This channel must be the current one and there is a pending one. Swap
- // in the pending one and we are done.
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Promoting pending LB channel %p to replace "
- "current LB channel %p",
- xdslb_policy, lb_chand, xdslb_policy->lb_chand_.get());
- }
- xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_);
- } else {
- // This channel is the most recently created one. Try to restart the call
- // and reresolve.
- eds_calld->parent_->OnCallFinishedLocked();
- xdslb_policy->channel_control_helper()->RequestReresolution();
- // If the fallback-at-startup checks are pending, go into fallback mode
- // immediately. This short-circuits the timeout for the
- // fallback-at-startup case.
- if (xdslb_policy->fallback_at_startup_checks_pending_) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Balancer call finished; entering fallback mode",
- xdslb_policy);
- xdslb_policy->fallback_at_startup_checks_pending_ = false;
- grpc_timer_cancel(&xdslb_policy->lb_fallback_timer_);
- lb_chand->CancelConnectivityWatchLocked();
- xdslb_policy->UpdateFallbackPolicyLocked();
- }
- }
- }
- eds_calld->Unref(DEBUG_LOCATION, "EDS+OnStatusReceivedLocked");
-}
-
-bool XdsLb::LbChannelState::EdsCallState::IsCurrentCallOnChannel() const {
- // If the retryable EDS call is null (which only happens when the LB channel
- // is shutting down), all the EDS calls are stale.
- if (lb_chand()->eds_calld_ == nullptr) return false;
- return this == lb_chand()->eds_calld_->lb_calld();
-}
-
-//
-// XdsLb::LbChannelState::LrsCallState::Reporter
-//
-
-void XdsLb::LbChannelState::LrsCallState::Reporter::Orphan() {
- if (next_report_timer_callback_pending_) {
- grpc_timer_cancel(&next_report_timer_);
- }
-}
-
-void XdsLb::LbChannelState::LrsCallState::Reporter::ScheduleNextReportLocked() {
- const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_;
- grpc_timer_init(&next_report_timer_, next_report_time,
- &on_next_report_timer_);
- next_report_timer_callback_pending_ = true;
-}
-
-void XdsLb::LbChannelState::LrsCallState::Reporter::OnNextReportTimerLocked(
- void* arg, grpc_error* error) {
- Reporter* self = static_cast(arg);
- self->next_report_timer_callback_pending_ = false;
- if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
- self->Unref(DEBUG_LOCATION, "Reporter+timer");
- return;
- }
- self->SendReportLocked();
-}
-
-void XdsLb::LbChannelState::LrsCallState::Reporter::SendReportLocked() {
- // Create a request that contains the load report.
- grpc_slice request_payload_slice = XdsLrsRequestCreateAndEncode(
- parent_->cluster_name_.get(), &xdslb_policy()->client_stats_);
- // Skip client load report if the counters were all zero in the last
- // report and they are still zero in this one.
- const bool old_val = last_report_counters_were_zero_;
- last_report_counters_were_zero_ = static_cast(
- grpc_slice_eq(request_payload_slice, grpc_empty_slice()));
- if (old_val && last_report_counters_were_zero_) {
- ScheduleNextReportLocked();
- return;
- }
- parent_->send_message_payload_ =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
- // Send the report.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_SEND_MESSAGE;
- op.data.send_message.send_message = parent_->send_message_payload_;
- grpc_call_error call_error = grpc_call_start_batch_and_execute(
- parent_->lb_call_, &op, 1, &on_report_done_);
- if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
- gpr_log(GPR_ERROR,
- "[xdslb %p] lb_calld=%p call_error=%d sending client load report",
- xdslb_policy(), this, call_error);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- }
-}
-
-void XdsLb::LbChannelState::LrsCallState::Reporter::OnReportDoneLocked(
- void* arg, grpc_error* error) {
- Reporter* self = static_cast(arg);
- grpc_byte_buffer_destroy(self->parent_->send_message_payload_);
- self->parent_->send_message_payload_ = nullptr;
- if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
- // If this reporter is no longer the current one on the call, the reason
- // might be that it was orphaned for a new one due to config update.
- if (!self->IsCurrentReporterOnCall()) {
- self->parent_->MaybeStartReportingLocked();
- }
- self->Unref(DEBUG_LOCATION, "Reporter+report_done");
- return;
- }
- self->ScheduleNextReportLocked();
-}
-
-//
-// XdsLb::LbChannelState::LrsCallState
-//
-XdsLb::LbChannelState::LrsCallState::LrsCallState(
- RefCountedPtr> parent)
- : InternallyRefCounted(&grpc_lb_xds_trace),
- parent_(std::move(parent)) {
- // Init the LB call. Note that the LB call will progress every time there's
- // activity in xdslb_policy()->interested_parties(), which is comprised of
- // the polling entities from client_channel.
- GPR_ASSERT(xdslb_policy() != nullptr);
- GPR_ASSERT(xdslb_policy()->server_name_ != nullptr);
- GPR_ASSERT(xdslb_policy()->server_name_[0] != '\0');
- const grpc_millis deadline =
- xdslb_policy()->lb_call_timeout_ms_ == 0
- ? GRPC_MILLIS_INF_FUTURE
- : ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_;
- lb_call_ = grpc_channel_create_pollset_set_call(
- lb_chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
- xdslb_policy()->interested_parties(),
- GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_LOAD_STATS_DOT_V2_DOT_LOADREPORTINGSERVICE_SLASH_STREAMLOADSTATS,
- nullptr, deadline, nullptr);
- GPR_ASSERT(lb_call_ != nullptr);
- // Init the LB call request payload.
- grpc_slice request_payload_slice =
- XdsLrsRequestCreateAndEncode(xdslb_policy()->server_name_);
- send_message_payload_ =
- grpc_raw_byte_buffer_create(&request_payload_slice, 1);
- grpc_slice_unref_internal(request_payload_slice);
- // Init other data associated with the LRS call.
- grpc_metadata_array_init(&initial_metadata_recv_);
- grpc_metadata_array_init(&trailing_metadata_recv_);
- GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSentLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this,
- grpc_combiner_scheduler(xdslb_policy()->combiner()));
- // Start the call.
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Starting LRS call (lb_chand: %p, lb_calld: %p, "
- "lb_call: %p)",
- xdslb_policy(), lb_chand(), this, lb_call_);
- }
- // Create the ops.
- grpc_call_error call_error;
- grpc_op ops[3];
- memset(ops, 0, sizeof(ops));
- // Op: send initial metadata.
- grpc_op* op = ops;
- op->op = GRPC_OP_SEND_INITIAL_METADATA;
- op->data.send_initial_metadata.count = 0;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // Op: send request message.
- GPR_ASSERT(send_message_payload_ != nullptr);
- op->op = GRPC_OP_SEND_MESSAGE;
- op->data.send_message.send_message = send_message_payload_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release();
- call_error = grpc_call_start_batch_and_execute(
- lb_call_, ops, (size_t)(op - ops), &on_initial_request_sent_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- // Op: recv initial metadata.
- op = ops;
- op->op = GRPC_OP_RECV_INITIAL_METADATA;
- op->data.recv_initial_metadata.recv_initial_metadata =
- &initial_metadata_recv_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // Op: recv response.
- op->op = GRPC_OP_RECV_MESSAGE;
- op->data.recv_message.recv_message = &recv_message_payload_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release();
- call_error = grpc_call_start_batch_and_execute(
- lb_call_, ops, (size_t)(op - ops), &on_response_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
- // Op: recv server status.
- op = ops;
- op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
- op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
- op->data.recv_status_on_client.status = &status_code_;
- op->data.recv_status_on_client.status_details = &status_details_;
- op->flags = 0;
- op->reserved = nullptr;
- op++;
- // This callback signals the end of the LB call, so it relies on the initial
- // ref instead of a new ref. When it's invoked, it's the initial ref that is
- // unreffed.
- call_error = grpc_call_start_batch_and_execute(
- lb_call_, ops, (size_t)(op - ops), &on_status_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-
-XdsLb::LbChannelState::LrsCallState::~LrsCallState() {
- grpc_metadata_array_destroy(&initial_metadata_recv_);
- grpc_metadata_array_destroy(&trailing_metadata_recv_);
- grpc_byte_buffer_destroy(send_message_payload_);
- grpc_byte_buffer_destroy(recv_message_payload_);
- grpc_slice_unref_internal(status_details_);
- GPR_ASSERT(lb_call_ != nullptr);
- grpc_call_unref(lb_call_);
-}
-
-void XdsLb::LbChannelState::LrsCallState::Orphan() {
- reporter_.reset();
- GPR_ASSERT(lb_call_ != nullptr);
- // If we are here because xdslb_policy wants to cancel the call,
- // on_status_received_ will complete the cancellation and clean up. Otherwise,
- // we are here because xdslb_policy has to orphan a failed call, then the
- // following cancellation will be a no-op.
- grpc_call_cancel(lb_call_, nullptr);
- // Note that the initial ref is hold by on_status_received_. So the
- // corresponding unref happens in on_status_received_ instead of here.
-}
-
-void XdsLb::LbChannelState::LrsCallState::MaybeStartReportingLocked() {
- // Don't start if this is not the current call on the current channel.
- if (!IsCurrentCallOnChannel() || !lb_chand()->IsCurrentChannel()) return;
- // Don't start again if already started.
- if (reporter_ != nullptr) return;
- // Don't start if the previous send_message op (of the initial request or the
- // last report of the previous reporter) hasn't completed.
- if (send_message_payload_ != nullptr) return;
- // Don't start if no LRS response has arrived.
- if (!seen_response()) return;
- // Don't start if the EDS call hasn't received any valid response. Note that
- // this must be the first channel because it is the current channel but its
- // EDS call hasn't seen any response.
- EdsCallState* eds_calld = lb_chand()->eds_calld_->lb_calld();
- if (eds_calld == nullptr || !eds_calld->seen_response()) return;
- // Start reporting.
- lb_chand()->xdslb_policy_->client_stats_.MaybeInitLastReportTime();
- reporter_ = MakeOrphanable(
- Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_);
-}
-
-void XdsLb::LbChannelState::LrsCallState::OnInitialRequestSentLocked(
- void* arg, grpc_error* error) {
- LrsCallState* lrs_calld = static_cast(arg);
- // Clear the send_message_payload_.
- grpc_byte_buffer_destroy(lrs_calld->send_message_payload_);
- lrs_calld->send_message_payload_ = nullptr;
- lrs_calld->MaybeStartReportingLocked();
- lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked");
-}
-
-void XdsLb::LbChannelState::LrsCallState::OnResponseReceivedLocked(
- void* arg, grpc_error* error) {
- LrsCallState* lrs_calld = static_cast(arg);
- XdsLb* xdslb_policy = lrs_calld->xdslb_policy();
- // Empty payload means the LB call was cancelled.
- if (!lrs_calld->IsCurrentCallOnChannel() ||
- lrs_calld->recv_message_payload_ == nullptr) {
- lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked");
- return;
- }
- // Read the response.
- grpc_byte_buffer_reader bbr;
- grpc_byte_buffer_reader_init(&bbr, lrs_calld->recv_message_payload_);
- grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
- grpc_byte_buffer_reader_destroy(&bbr);
- grpc_byte_buffer_destroy(lrs_calld->recv_message_payload_);
- lrs_calld->recv_message_payload_ = nullptr;
- // This anonymous lambda is a hack to avoid the usage of goto.
- [&]() {
- // Parse the response.
- UniquePtr new_cluster_name;
- grpc_millis new_load_reporting_interval;
- grpc_error* parse_error = XdsLrsResponseDecodeAndParse(
- response_slice, &new_cluster_name, &new_load_reporting_interval);
- if (parse_error != GRPC_ERROR_NONE) {
- gpr_log(GPR_ERROR, "[xdslb %p] LRS response parsing failed. error=%s",
- xdslb_policy, grpc_error_string(parse_error));
- GRPC_ERROR_UNREF(parse_error);
- return;
- }
- lrs_calld->seen_response_ = true;
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
+ void OnError(grpc_error* error) override {
+ // If the fallback-at-startup checks are pending, go into fallback mode
+ // immediately. This short-circuits the timeout for the
+ // fallback-at-startup case.
+ if (xds_policy_->fallback_at_startup_checks_pending_) {
gpr_log(GPR_INFO,
- "[xdslb %p] LRS response received, cluster_name=%s, "
- "load_report_interval=%" PRId64 "ms",
- xdslb_policy, new_cluster_name.get(),
- new_load_reporting_interval);
- }
- if (new_load_reporting_interval <
- GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) {
- new_load_reporting_interval =
- GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS;
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(
- GPR_INFO,
- "[xdslb %p] Increased load_report_interval to minimum value %dms",
- xdslb_policy, GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
- }
- }
- // Ignore identical update.
- if (lrs_calld->load_reporting_interval_ == new_load_reporting_interval &&
- strcmp(lrs_calld->cluster_name_.get(), new_cluster_name.get()) == 0) {
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Incoming LRS response identical to current, "
- "ignoring.",
- xdslb_policy);
+ "[xdslb %p] xds watcher reported error; entering fallback "
+ "mode: %s",
+ xds_policy_.get(), grpc_error_string(error));
+ xds_policy_->fallback_at_startup_checks_pending_ = false;
+ grpc_timer_cancel(&xds_policy_->lb_fallback_timer_);
+ xds_policy_->UpdateFallbackPolicyLocked();
+ // If the xds call failed, request re-resolution.
+ // TODO(roth): We check the error string contents here to
+ // differentiate between the xds call failing and the xds channel
+ // going into TRANSIENT_FAILURE. This is a pretty ugly hack,
+ // but it's okay for now, since we're not yet sure whether we will
+ // continue to support the current fallback functionality. If we
+ // decide to keep the fallback approach, then we should either
+ // find a cleaner way to expose the difference between these two
+ // cases or decide that we're okay re-resolving in both cases.
+ // Note that even if we do keep the current fallback functionality,
+ // this re-resolution will only be necessary if we are going to be
+ // using this LB policy with resolvers other than the xds resolver.
+ if (strstr(grpc_error_string(error), "xds call failed")) {
+ xds_policy_->channel_control_helper()->RequestReresolution();
}
- return;
}
- // Stop current load reporting (if any) to adopt the new config.
- lrs_calld->reporter_.reset();
- // Record the new config.
- lrs_calld->cluster_name_ = std::move(new_cluster_name);
- lrs_calld->load_reporting_interval_ = new_load_reporting_interval;
- // Try starting sending load report.
- lrs_calld->MaybeStartReportingLocked();
- }();
- grpc_slice_unref_internal(response_slice);
- if (xdslb_policy->shutting_down_) {
- lrs_calld->Unref(DEBUG_LOCATION,
- "LRS+OnResponseReceivedLocked+xds_shutdown");
- return;
+ GRPC_ERROR_UNREF(error);
}
- // Keep listening for LRS config updates.
- grpc_op op;
- memset(&op, 0, sizeof(op));
- op.op = GRPC_OP_RECV_MESSAGE;
- op.data.recv_message.recv_message = &lrs_calld->recv_message_payload_;
- op.flags = 0;
- op.reserved = nullptr;
- GPR_ASSERT(lrs_calld->lb_call_ != nullptr);
- // Reuse the "OnResponseReceivedLocked" ref taken in ctor.
- const grpc_call_error call_error = grpc_call_start_batch_and_execute(
- lrs_calld->lb_call_, &op, 1, &lrs_calld->on_response_received_);
- GPR_ASSERT(GRPC_CALL_OK == call_error);
-}
-void XdsLb::LbChannelState::LrsCallState::OnStatusReceivedLocked(
- void* arg, grpc_error* error) {
- LrsCallState* lrs_calld = static_cast(arg);
- XdsLb* xdslb_policy = lrs_calld->xdslb_policy();
- LbChannelState* lb_chand = lrs_calld->lb_chand();
- GPR_ASSERT(lrs_calld->lb_call_ != nullptr);
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- char* status_details = grpc_slice_to_c_string(lrs_calld->status_details_);
- gpr_log(GPR_INFO,
- "[xdslb %p] LRS call status received. Status = %d, details "
- "= '%s', (lb_chand: %p, lb_calld: %p, lb_call: %p), error '%s'",
- xdslb_policy, lrs_calld->status_code_, status_details, lb_chand,
- lrs_calld, lrs_calld->lb_call_, grpc_error_string(error));
- gpr_free(status_details);
- }
- // Ignore status from a stale call.
- if (lrs_calld->IsCurrentCallOnChannel()) {
- // Because this call is the current one on the channel, the channel can't
- // have been swapped out; otherwise, the call should have been reset.
- GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel());
- GPR_ASSERT(!xdslb_policy->shutting_down_);
- if (lb_chand == xdslb_policy->LatestLbChannel()) {
- // This channel is the most recently created one. Try to restart the call
- // and reresolve.
- lrs_calld->parent_->OnCallFinishedLocked();
- xdslb_policy->channel_control_helper()->RequestReresolution();
- }
- }
- lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnStatusReceivedLocked");
-}
-
-bool XdsLb::LbChannelState::LrsCallState::IsCurrentCallOnChannel() const {
- // If the retryable LRS call is null (which only happens when the LB channel
- // is shutting down), all the LRS calls are stale.
- if (lb_chand()->lrs_calld_ == nullptr) return false;
- return this == lb_chand()->lrs_calld_->lb_calld();
-}
-
-//
-// helper code for creating balancer channel
-//
-
-// Returns the channel args for the LB channel, used to create a bidirectional
-// stream for the reception of load balancing updates.
-grpc_channel_args* BuildBalancerChannelArgs(const grpc_channel_args* args) {
- static const char* args_to_remove[] = {
- // LB policy name, since we want to use the default (pick_first) in
- // the LB channel.
- GRPC_ARG_LB_POLICY_NAME,
- // The service config that contains the LB config. We don't want to
- // recursively use xds in the LB channel.
- GRPC_ARG_SERVICE_CONFIG,
- // The channel arg for the server URI, since that will be different for
- // the LB channel than for the parent channel. The client channel
- // factory will re-add this arg with the right value.
- GRPC_ARG_SERVER_URI,
- // The LB channel should use the authority indicated by the target
- // authority table (see \a ModifyXdsBalancerChannelArgs),
- // as opposed to the authority from the parent channel.
- GRPC_ARG_DEFAULT_AUTHORITY,
- // Just as for \a GRPC_ARG_DEFAULT_AUTHORITY, the LB channel should be
- // treated as a stand-alone channel and not inherit this argument from the
- // args of the parent channel.
- GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
- // Don't want to pass down channelz node from parent; the balancer
- // channel will get its own.
- GRPC_ARG_CHANNELZ_CHANNEL_NODE,
- };
- // Channel args to add.
- InlinedVector args_to_add;
- // A channel arg indicating the target is a xds load balancer.
- args_to_add.emplace_back(grpc_channel_arg_integer_create(
- const_cast(GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER), 1));
- // The parent channel's channelz uuid.
- channelz::ChannelNode* channelz_node = nullptr;
- const grpc_arg* arg =
- grpc_channel_args_find(args, GRPC_ARG_CHANNELZ_CHANNEL_NODE);
- if (arg != nullptr && arg->type == GRPC_ARG_POINTER &&
- arg->value.pointer.p != nullptr) {
- channelz_node = static_cast(arg->value.pointer.p);
- args_to_add.emplace_back(
- channelz::MakeParentUuidArg(channelz_node->uuid()));
- }
- // Construct channel args.
- grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
- args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), args_to_add.data(),
- args_to_add.size());
- // Make any necessary modifications for security.
- return ModifyXdsBalancerChannelArgs(new_args);
-}
+ private:
+ RefCountedPtr xds_policy_;
+};
//
// ctor and dtor
@@ -1785,8 +661,6 @@ grpc_channel_args* BuildBalancerChannelArgs(const grpc_channel_args* args) {
XdsLb::XdsLb(Args args)
: LoadBalancingPolicy(std::move(args)),
- lb_call_timeout_ms_(grpc_channel_args_find_integer(
- args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS, {0, 0, INT_MAX})),
lb_fallback_timeout_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS,
{GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX})),
@@ -1837,10 +711,29 @@ void XdsLb::ShutdownLocked() {
}
fallback_policy_.reset();
pending_fallback_policy_.reset();
- // We reset the LB channels here instead of in our destructor because they
- // hold refs to XdsLb.
- lb_chand_.reset();
- pending_lb_chand_.reset();
+ xds_client_.reset();
+ // TODO(roth): When we instantiate the XdsClient in the resolver
+ // instead of here, re-enable the code below. Right now, we need to NOT
+ // cancel the watches, since the watchers are holding refs to this LB
+ // policy, and it causes polling-related crashes when this LB policy's
+ // pollset_set goes away before the one in the XdsClient object. However,
+ // once the resolver becomes the owner of the XdsClient object, it will be
+ // using the pollset_set of the resolver, and the resolver will be kept
+ // alive until after the XdsClient is destroyed via the ServiceConfigWatcher.
+ // At that point, we will not need to prevent this LB policy from being
+ // destroyed before the XdsClient, but we WILL need to drop these refs so
+ // that the LB policy will be destroyed if the XdsClient object is not being
+ // destroyed at the same time (e.g., if this LB policy is going away
+ // due to an RDS update that changed the clusters we're using).
+#if 0
+ // Cancel the endpoint watch here instead of in our dtor, because the
+ // watcher holds a ref to us.
+ if (xds_client_ != nullptr) {
+ xds_client_->CancelEndpointDataWatch(StringView(server_name_),
+ endpoint_watcher_);
+ xds_client_->RemoveClientStats(StringView(server_name_), &client_stats_);
+ }
+#endif
}
//
@@ -1848,12 +741,10 @@ void XdsLb::ShutdownLocked() {
//
void XdsLb::ResetBackoffLocked() {
- if (lb_chand_ != nullptr) {
- grpc_channel_reset_connect_backoff(lb_chand_->channel());
- }
- if (pending_lb_chand_ != nullptr) {
- grpc_channel_reset_connect_backoff(pending_lb_chand_->channel());
- }
+ // TODO(roth): When we instantiate the XdsClient in the resolver
+ // instead of in this LB policy, this should be done in the resolver
+ // instead of here.
+ if (xds_client_ != nullptr) xds_client_->ResetBackoff();
priority_list_.ResetBackoffLocked();
if (fallback_policy_ != nullptr) {
fallback_policy_->ResetBackoffLocked();
@@ -1863,45 +754,6 @@ void XdsLb::ResetBackoffLocked() {
}
}
-void XdsLb::ProcessAddressesAndChannelArgsLocked(
- ServerAddressList addresses, const grpc_channel_args& args) {
- // Update fallback address list.
- fallback_backend_addresses_ = std::move(addresses);
- // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args,
- // since we use this to trigger the client_load_reporting filter.
- static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
- grpc_arg new_arg = grpc_channel_arg_string_create(
- (char*)GRPC_ARG_LB_POLICY_NAME, (char*)"xds");
- grpc_channel_args_destroy(args_);
- args_ = grpc_channel_args_copy_and_add_and_remove(
- &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
- // Construct args for balancer channel.
- grpc_channel_args* lb_channel_args = BuildBalancerChannelArgs(&args);
- // Create an LB channel if we don't have one yet or the balancer name has
- // changed from the last received one.
- bool create_lb_channel = lb_chand_ == nullptr;
- if (lb_chand_ != nullptr) {
- UniquePtr last_balancer_name(
- grpc_channel_get_target(LatestLbChannel()->channel()));
- create_lb_channel =
- strcmp(last_balancer_name.get(), balancer_name_.get()) != 0;
- }
- if (create_lb_channel) {
- OrphanablePtr lb_chand = MakeOrphanable(
- Ref(DEBUG_LOCATION, "XdsLb+LbChannelState"), balancer_name_.get(),
- *lb_channel_args);
- if (lb_chand_ == nullptr || !lb_chand_->HasActiveEdsCall()) {
- GPR_ASSERT(pending_lb_chand_ == nullptr);
- // If we do not have a working LB channel yet, use the newly created one.
- lb_chand_ = std::move(lb_chand);
- } else {
- // Otherwise, wait until the new LB channel to be ready to swap it in.
- pending_lb_chand_ = std::move(lb_chand);
- }
- }
- grpc_channel_args_destroy(lb_channel_args);
-}
-
void XdsLb::ParseLbConfig(const ParsedXdsConfig* xds_config) {
if (xds_config == nullptr || xds_config->balancer_name() == nullptr) return;
// TODO(yashykt) : does this need to be a gpr_strdup
@@ -1912,13 +764,32 @@ void XdsLb::ParseLbConfig(const ParsedXdsConfig* xds_config) {
}
void XdsLb::UpdateLocked(UpdateArgs args) {
- const bool is_initial_update = lb_chand_ == nullptr;
+ const bool is_initial_update = xds_client_ == nullptr;
ParseLbConfig(static_cast(args.config.get()));
+ // TODO(roth): This check should go away once we are getting the xds
+ // server from the bootstrap file.
if (balancer_name_ == nullptr) {
gpr_log(GPR_ERROR, "[xdslb %p] LB config parsing fails.", this);
return;
}
- ProcessAddressesAndChannelArgsLocked(std::move(args.addresses), *args.args);
+ // Update fallback address list.
+ fallback_backend_addresses_ = std::move(args.addresses);
+ // Update args.
+ grpc_channel_args_destroy(args_);
+ args_ = args.args;
+ args.args = nullptr;
+ // Create an xds client if we don't have one yet.
+ if (xds_client_ == nullptr) {
+ xds_client_ = MakeOrphanable(
+ combiner(), interested_parties(), balancer_name_.get(),
+ StringView(server_name_), nullptr /* service config watcher */, *args_);
+ endpoint_watcher_ = New(Ref());
+ xds_client_->WatchEndpointData(
+ StringView(server_name_),
+ UniquePtr(endpoint_watcher_));
+ xds_client_->AddClientStats(StringView(server_name_), &client_stats_);
+ }
+ // Update priority list.
priority_list_.UpdateLocked();
// Update the existing fallback policy. The fallback policy config and/or the
// fallback addresses may be new.
@@ -1931,10 +802,6 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
grpc_combiner_scheduler(combiner()));
fallback_at_startup_checks_pending_ = true;
grpc_timer_init(&lb_fallback_timer_, deadline, &lb_on_fallback_);
- // Start watching the channel's connectivity state. If the channel
- // goes into state TRANSIENT_FAILURE, we go into fallback mode even if
- // the fallback timeout has not elapsed.
- lb_chand_->StartConnectivityWatchLocked();
}
}
@@ -1949,7 +816,6 @@ void XdsLb::MaybeCancelFallbackAtStartupChecks() {
"watch",
this);
grpc_timer_cancel(&lb_fallback_timer_);
- lb_chand_->CancelConnectivityWatchLocked();
fallback_at_startup_checks_pending_ = false;
}
@@ -1967,7 +833,6 @@ void XdsLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
}
xdslb_policy->fallback_at_startup_checks_pending_ = false;
xdslb_policy->UpdateFallbackPolicyLocked();
- xdslb_policy->lb_chand_->CancelConnectivityWatchLocked();
}
xdslb_policy->Unref(DEBUG_LOCATION, "on_fallback_timer");
}
@@ -2824,7 +1689,6 @@ void XdsLb::PriorityList::LocalityMap::Locality::Helper::UpdateState(
// This request is from an outdated child, so ignore it.
return;
}
- GPR_ASSERT(locality_->xds_policy()->lb_chand_ != nullptr);
// Cache the picker and its state in the locality.
locality_->picker_wrapper_ = MakeRefCounted(
std::move(picker),
@@ -2835,30 +1699,6 @@ void XdsLb::PriorityList::LocalityMap::Locality::Helper::UpdateState(
locality_->locality_map_->OnLocalityStateUpdateLocked();
}
-void XdsLb::PriorityList::LocalityMap::Locality::Helper::RequestReresolution() {
- if (locality_->xds_policy()->shutting_down_) return;
- // If there is a pending child policy, ignore re-resolution requests
- // from the current child policy (or any outdated child).
- if (locality_->pending_child_policy_ != nullptr && !CalledByPendingChild()) {
- return;
- }
- if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
- gpr_log(GPR_INFO,
- "[xdslb %p] Re-resolution requested from the internal RR policy "
- "(%p).",
- locality_->xds_policy(), locality_->child_policy_.get());
- }
- GPR_ASSERT(locality_->xds_policy()->lb_chand_ != nullptr);
- // If we are talking to a balancer, we expect to get updated addresses
- // from the balancer, so we can ignore the re-resolution request from
- // the child policy. Otherwise, pass the re-resolution request up to the
- // channel.
- if (locality_->xds_policy()->lb_chand_->eds_calld() == nullptr ||
- !locality_->xds_policy()->lb_chand_->eds_calld()->seen_response()) {
- locality_->xds_policy()->channel_control_helper()->RequestReresolution();
- }
-}
-
void XdsLb::PriorityList::LocalityMap::Locality::Helper::AddTraceEvent(
TraceSeverity severity, StringView message) {
if (locality_->xds_policy()->shutting_down_ ||
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.h b/src/core/ext/filters/client_channel/lb_policy/xds/xds.h
index 8b20680f2d6..13d3435da34 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.h
+++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.h
@@ -21,14 +21,11 @@
#include
-/** Channel arg indicating if a target corresponding to the address is grpclb
- * loadbalancer. The type of this arg is an integer and the value is treated as
- * a bool. */
-#define GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER \
- "grpc.address_is_xds_load_balancer"
/** Channel arg indicating if a target corresponding to the address is a backend
* received from a balancer. The type of this arg is an integer and the value is
* treated as a bool. */
+// TODO(roth): Depending on how we ultimately decide to handle fallback,
+// this may no longer be needed.
#define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER \
"grpc.address_is_backend_from_xds_load_balancer"
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc
similarity index 99%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
rename to src/core/ext/filters/client_channel/xds/xds_api.cc
index ec006b16bfd..13ce47f4416 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/xds/xds_api.cc
@@ -24,7 +24,7 @@
#include
#include
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h"
+#include "src/core/ext/filters/client_channel/xds/xds_api.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -247,7 +247,7 @@ grpc_error* DropParseAndAppend(
} // namespace
grpc_error* XdsEdsResponseDecodeAndParse(const grpc_slice& encoded_response,
- XdsUpdate* update) {
+ EdsUpdate* update) {
upb::Arena arena;
// Decode the response.
const envoy_api_v2_DiscoveryResponse* response =
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h
similarity index 90%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
rename to src/core/ext/filters/client_channel/xds/xds_api.h
index 1b56bef7d81..3b89c682da3 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/xds/xds_api.h
@@ -16,16 +16,17 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_API_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_API_H
#include
+#include
+
#include
-#include
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h"
#include "src/core/ext/filters/client_channel/server_address.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
namespace grpc_core {
@@ -62,6 +63,9 @@ class XdsPriorityListUpdate {
};
bool operator==(const XdsPriorityListUpdate& other) const;
+ bool operator!=(const XdsPriorityListUpdate& other) const {
+ return !(*this == other);
+ }
void Add(LocalityMap::Locality locality);
@@ -125,19 +129,22 @@ class XdsDropConfig : public RefCounted {
DropCategoryList drop_category_list_;
};
-struct XdsUpdate {
+struct EdsUpdate {
XdsPriorityListUpdate priority_list_update;
RefCountedPtr drop_config;
bool drop_all = false;
};
+// TODO(juanlishen): Add fields as part of implementing CDS support.
+struct CdsUpdate {};
+
// Creates an EDS request querying \a service_name.
grpc_slice XdsEdsRequestCreateAndEncode(const char* server_name);
// Parses the EDS response and returns the args to update locality map. If there
// is any error, the output update is invalid.
grpc_error* XdsEdsResponseDecodeAndParse(const grpc_slice& encoded_response,
- XdsUpdate* update);
+ EdsUpdate* update);
// Creates an LRS request querying \a server_name.
grpc_slice XdsLrsRequestCreateAndEncode(const char* server_name);
@@ -156,5 +163,4 @@ grpc_error* XdsLrsResponseDecodeAndParse(const grpc_slice& encoded_response,
} // namespace grpc_core
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H \
- */
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_API_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc b/src/core/ext/filters/client_channel/xds/xds_channel.cc
similarity index 73%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc
rename to src/core/ext/filters/client_channel/xds/xds_channel.cc
index 386517d6427..49e08c82c3d 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.cc
+++ b/src/core/ext/filters/client_channel/xds/xds_channel.cc
@@ -20,16 +20,16 @@
#include
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h"
+#include "src/core/ext/filters/client_channel/xds/xds_channel.h"
namespace grpc_core {
-grpc_channel_args* ModifyXdsBalancerChannelArgs(grpc_channel_args* args) {
+grpc_channel_args* ModifyXdsChannelArgs(grpc_channel_args* args) {
return args;
}
-grpc_channel* CreateXdsBalancerChannel(const char* target_uri,
- const grpc_channel_args& args) {
+grpc_channel* CreateXdsChannel(const char* target_uri,
+ const grpc_channel_args& args) {
return grpc_insecure_channel_create(target_uri, &args, nullptr);
}
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h b/src/core/ext/filters/client_channel/xds/xds_channel.h
similarity index 68%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h
rename to src/core/ext/filters/client_channel/xds/xds_channel.h
index 516bac1df25..7434e8ce2b4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h
+++ b/src/core/ext/filters/client_channel/xds/xds_channel.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_H
#include
@@ -31,12 +31,12 @@ namespace grpc_core {
/// Takes ownership of \a args.
///
/// Caller takes ownership of the returned args.
-grpc_channel_args* ModifyXdsBalancerChannelArgs(grpc_channel_args* args);
+grpc_channel_args* ModifyXdsChannelArgs(grpc_channel_args* args);
-grpc_channel* CreateXdsBalancerChannel(const char* target_uri,
- const grpc_channel_args& args);
+grpc_channel* CreateXdsChannel(const char* target_uri,
+ const grpc_channel_args& args);
} // namespace grpc_core
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H \
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_H \
*/
diff --git a/src/core/ext/filters/client_channel/xds/xds_channel_args.h b/src/core/ext/filters/client_channel/xds/xds_channel_args.h
new file mode 100644
index 00000000000..cabdc512264
--- /dev/null
+++ b/src/core/ext/filters/client_channel/xds/xds_channel_args.h
@@ -0,0 +1,26 @@
+//
+// Copyright 2019 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_ARGS_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_ARGS_H
+
+// Boolean channel arg indicating whether the target is an xds server.
+#define GRPC_ARG_ADDRESS_IS_XDS_SERVER "grpc.address_is_xds_server"
+
+// Pointer channel arg containing a ref to the XdsClient object.
+#define GRPC_ARG_XDS_CLIENT "grpc.xds_client"
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CHANNEL_ARGS_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc b/src/core/ext/filters/client_channel/xds/xds_channel_secure.cc
similarity index 89%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
rename to src/core/ext/filters/client_channel/xds/xds_channel_secure.cc
index 720d5a01d9d..2935be31bbf 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/xds/xds_channel_secure.cc
@@ -18,7 +18,7 @@
#include
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h"
+#include "src/core/ext/filters/client_channel/xds/xds_channel.h"
#include
@@ -37,7 +37,7 @@
namespace grpc_core {
-grpc_channel_args* ModifyXdsBalancerChannelArgs(grpc_channel_args* args) {
+grpc_channel_args* ModifyXdsChannelArgs(grpc_channel_args* args) {
InlinedVector args_to_remove;
InlinedVector args_to_add;
// Substitute the channel credentials with a version without call
@@ -62,12 +62,12 @@ grpc_channel_args* ModifyXdsBalancerChannelArgs(grpc_channel_args* args) {
return result;
}
-grpc_channel* CreateXdsBalancerChannel(const char* target_uri,
- const grpc_channel_args& args) {
+grpc_channel* CreateXdsChannel(const char* target_uri,
+ const grpc_channel_args& args) {
grpc_channel_credentials* creds =
grpc_channel_credentials_find_in_args(&args);
if (creds == nullptr) {
- // Build with security but parent channel is insecure.
+ // Built with security but parent channel is insecure.
return grpc_insecure_channel_create(target_uri, &args, nullptr);
}
const char* arg_to_remove = GRPC_ARG_CHANNEL_CREDENTIALS;
diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc
new file mode 100644
index 00000000000..de30664d225
--- /dev/null
+++ b/src/core/ext/filters/client_channel/xds/xds_client.cc
@@ -0,0 +1,1305 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "src/core/ext/filters/client_channel/client_channel.h"
+#include "src/core/ext/filters/client_channel/parse_address.h"
+#include "src/core/ext/filters/client_channel/server_address.h"
+#include "src/core/ext/filters/client_channel/service_config.h"
+#include "src/core/ext/filters/client_channel/xds/xds_api.h"
+#include "src/core/ext/filters/client_channel/xds/xds_channel.h"
+#include "src/core/ext/filters/client_channel/xds/xds_channel_args.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
+#include "src/core/lib/backoff/backoff.h"
+#include "src/core/lib/channel/channel_args.h"
+#include "src/core/lib/channel/channel_stack.h"
+#include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/map.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/gprpp/sync.h"
+#include "src/core/lib/iomgr/combiner.h"
+#include "src/core/lib/iomgr/sockaddr.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
+#include "src/core/lib/iomgr/timer.h"
+#include "src/core/lib/slice/slice_hash_table.h"
+#include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/surface/call.h"
+#include "src/core/lib/surface/channel.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "src/core/lib/transport/static_metadata.h"
+
+#define GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS 1
+#define GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER 1.6
+#define GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS 120
+#define GRPC_XDS_RECONNECT_JITTER 0.2
+#define GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS 1000
+
+namespace grpc_core {
+
+TraceFlag grpc_xds_client_trace(false, "xds_client");
+
+// Contains a channel to the xds server and all the data related to the
+// channel. Holds a ref to the xds client object.
+// TODO(roth): This is separate from the XdsClient object because it was
+// originally designed to be able to swap itself out in case the
+// balancer name changed. Now that the balancer name is going to be
+// coming from the bootstrap file, we don't really need this level of
+// indirection unless we decide to support watching the bootstrap file
+// for changes. At some point, if we decide that we're never going to
+// need to do that, then we can eliminate this class and move its
+// contents directly into the XdsClient class.
+class XdsClient::ChannelState : public InternallyRefCounted {
+ public:
+ // An xds call wrapper that can restart a call upon failure. Holds a ref to
+ // the xds channel. The template parameter is the kind of wrapped xds call.
+ template
+ class RetryableCall : public InternallyRefCounted> {
+ public:
+ explicit RetryableCall(RefCountedPtr chand);
+
+ void Orphan() override;
+
+ void OnCallFinishedLocked();
+
+ T* calld() const { return calld_.get(); }
+ ChannelState* chand() const { return chand_.get(); }
+
+ private:
+ void StartNewCallLocked();
+ void StartRetryTimerLocked();
+ static void OnRetryTimerLocked(void* arg, grpc_error* error);
+
+ // The wrapped call that talks to the xds server. It's instantiated
+ // every time we start a new call. It's null during call retry backoff.
+ OrphanablePtr calld_;
+ // The owning xds channel.
+ RefCountedPtr chand_;
+
+ // Retry state.
+ BackOff backoff_;
+ grpc_timer retry_timer_;
+ grpc_closure on_retry_timer_;
+ bool retry_timer_callback_pending_ = false;
+
+ bool shutting_down_ = false;
+ };
+
+ // Contains an EDS call to the xds server.
+ class EdsCallState : public InternallyRefCounted {
+ public:
+ // The ctor and dtor should not be used directly.
+ explicit EdsCallState(RefCountedPtr> parent);
+ ~EdsCallState() override;
+
+ void Orphan() override;
+
+ RetryableCall* parent() const { return parent_.get(); }
+ ChannelState* chand() const { return parent_->chand(); }
+ XdsClient* xds_client() const { return chand()->xds_client(); }
+ bool seen_response() const { return seen_response_; }
+
+ private:
+ static void OnResponseReceivedLocked(void* arg, grpc_error* error);
+ static void OnStatusReceivedLocked(void* arg, grpc_error* error);
+
+ bool IsCurrentCallOnChannel() const;
+
+ // The owning RetryableCall<>.
+ RefCountedPtr> parent_;
+ bool seen_response_ = false;
+
+ // Always non-NULL.
+ grpc_call* call_;
+
+ // recv_initial_metadata
+ grpc_metadata_array initial_metadata_recv_;
+
+ // send_message
+ grpc_byte_buffer* send_message_payload_ = nullptr;
+
+ // recv_message
+ grpc_byte_buffer* recv_message_payload_ = nullptr;
+ grpc_closure on_response_received_;
+
+ // recv_trailing_metadata
+ grpc_metadata_array trailing_metadata_recv_;
+ grpc_status_code status_code_;
+ grpc_slice status_details_;
+ grpc_closure on_status_received_;
+ };
+
+ // Contains an LRS call to the xds server.
+ class LrsCallState : public InternallyRefCounted {
+ public:
+ // The ctor and dtor should not be used directly.
+ explicit LrsCallState(RefCountedPtr> parent);
+ ~LrsCallState() override;
+
+ void Orphan() override;
+
+ void MaybeStartReportingLocked();
+
+ RetryableCall* parent() { return parent_.get(); }
+ ChannelState* chand() const { return parent_->chand(); }
+ XdsClient* xds_client() const { return chand()->xds_client(); }
+ bool seen_response() const { return seen_response_; }
+
+ private:
+ // Reports client-side load stats according to a fixed interval.
+ class Reporter : public InternallyRefCounted {
+ public:
+ Reporter(RefCountedPtr parent, grpc_millis report_interval)
+ : parent_(std::move(parent)), report_interval_(report_interval) {
+ GRPC_CLOSURE_INIT(&on_next_report_timer_, OnNextReportTimerLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ GRPC_CLOSURE_INIT(&on_report_done_, OnReportDoneLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ ScheduleNextReportLocked();
+ }
+
+ void Orphan() override;
+
+ private:
+ void ScheduleNextReportLocked();
+ static void OnNextReportTimerLocked(void* arg, grpc_error* error);
+ void SendReportLocked();
+ static void OnReportDoneLocked(void* arg, grpc_error* error);
+
+ bool IsCurrentReporterOnCall() const {
+ return this == parent_->reporter_.get();
+ }
+ XdsClient* xds_client() const { return parent_->xds_client(); }
+
+ // The owning LRS call.
+ RefCountedPtr parent_;
+
+ // The load reporting state.
+ const grpc_millis report_interval_;
+ bool last_report_counters_were_zero_ = false;
+ bool next_report_timer_callback_pending_ = false;
+ grpc_timer next_report_timer_;
+ grpc_closure on_next_report_timer_;
+ grpc_closure on_report_done_;
+ };
+
+ static void OnInitialRequestSentLocked(void* arg, grpc_error* error);
+ static void OnResponseReceivedLocked(void* arg, grpc_error* error);
+ static void OnStatusReceivedLocked(void* arg, grpc_error* error);
+
+ bool IsCurrentCallOnChannel() const;
+
+ // The owning RetryableCall<>.
+ RefCountedPtr> parent_;
+ bool seen_response_ = false;
+
+ // Always non-NULL.
+ grpc_call* call_;
+
+ // recv_initial_metadata
+ grpc_metadata_array initial_metadata_recv_;
+
+ // send_message
+ grpc_byte_buffer* send_message_payload_ = nullptr;
+ grpc_closure on_initial_request_sent_;
+
+ // recv_message
+ grpc_byte_buffer* recv_message_payload_ = nullptr;
+ grpc_closure on_response_received_;
+
+ // recv_trailing_metadata
+ grpc_metadata_array trailing_metadata_recv_;
+ grpc_status_code status_code_;
+ grpc_slice status_details_;
+ grpc_closure on_status_received_;
+
+ // Load reporting state.
+ UniquePtr cluster_name_;
+ grpc_millis load_reporting_interval_ = 0;
+ OrphanablePtr reporter_;
+ };
+
+ ChannelState(RefCountedPtr xds_client, const char* balancer_name,
+ const grpc_channel_args& args);
+ ~ChannelState();
+
+ void Orphan() override;
+
+ grpc_channel* channel() const { return channel_; }
+ XdsClient* xds_client() const { return xds_client_.get(); }
+ EdsCallState* eds_calld() const { return eds_calld_->calld(); }
+ LrsCallState* lrs_calld() const { return lrs_calld_->calld(); }
+
+ void MaybeStartEdsCall();
+ void StopEdsCall();
+
+ void MaybeStartLrsCall();
+ void StopLrsCall();
+
+ bool HasActiveEdsCall() const { return eds_calld_->calld() != nullptr; }
+
+ void StartConnectivityWatchLocked();
+ void CancelConnectivityWatchLocked();
+
+ private:
+ class StateWatcher;
+
+ // The owning xds client.
+ RefCountedPtr xds_client_;
+
+ // The channel and its status.
+ grpc_channel* channel_;
+ bool shutting_down_ = false;
+ StateWatcher* watcher_ = nullptr;
+
+ // The retryable XDS calls.
+ OrphanablePtr> eds_calld_;
+ OrphanablePtr> lrs_calld_;
+};
+
+//
+// XdsClient::ChannelState::StateWatcher
+//
+
+class XdsClient::ChannelState::StateWatcher
+ : public AsyncConnectivityStateWatcherInterface {
+ public:
+ explicit StateWatcher(RefCountedPtr parent)
+ : AsyncConnectivityStateWatcherInterface(
+ grpc_combiner_scheduler(parent->xds_client()->combiner_)),
+ parent_(std::move(parent)) {}
+
+ private:
+ void OnConnectivityStateChange(grpc_connectivity_state new_state) override {
+ if (!parent_->shutting_down_ &&
+ new_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+ // In TRANSIENT_FAILURE. Notify all watchers of error.
+ gpr_log(GPR_INFO,
+ "[xds_client %p] xds channel in state TRANSIENT_FAILURE",
+ parent_->xds_client());
+ parent_->xds_client()->NotifyOnError(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+ "xds channel in TRANSIENT_FAILURE"));
+ }
+ }
+
+ RefCountedPtr parent_;
+};
+
+//
+// XdsClient::ChannelState
+//
+
+namespace {
+
+// Returns the channel args for the xds channel.
+grpc_channel_args* BuildXdsChannelArgs(const grpc_channel_args& args) {
+ static const char* args_to_remove[] = {
+ // LB policy name, since we want to use the default (pick_first) in
+ // the LB channel.
+ GRPC_ARG_LB_POLICY_NAME,
+ // The service config that contains the LB config. We don't want to
+ // recursively use xds in the LB channel.
+ GRPC_ARG_SERVICE_CONFIG,
+ // The channel arg for the server URI, since that will be different for
+ // the xds channel than for the parent channel. The client channel
+ // factory will re-add this arg with the right value.
+ GRPC_ARG_SERVER_URI,
+ // The xds channel should use the authority indicated by the target
+ // authority table (see \a ModifyXdsChannelArgs),
+ // as opposed to the authority from the parent channel.
+ GRPC_ARG_DEFAULT_AUTHORITY,
+ // Just as for \a GRPC_ARG_DEFAULT_AUTHORITY, the xds channel should be
+ // treated as a stand-alone channel and not inherit this argument from the
+ // args of the parent channel.
+ GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
+ // Don't want to pass down channelz node from parent; the balancer
+ // channel will get its own.
+ GRPC_ARG_CHANNELZ_CHANNEL_NODE,
+ };
+ // Channel args to add.
+ InlinedVector args_to_add;
+ // A channel arg indicating that the target is an xds server.
+ // TODO(roth): Once we figure out our fallback and credentials story, decide
+ // whether this is actually needed. Note that it's currently used by the
+ // fake security connector as well.
+ args_to_add.emplace_back(grpc_channel_arg_integer_create(
+ const_cast(GRPC_ARG_ADDRESS_IS_XDS_SERVER), 1));
+ // The parent channel's channelz uuid.
+ channelz::ChannelNode* channelz_node = nullptr;
+ const grpc_arg* arg =
+ grpc_channel_args_find(&args, GRPC_ARG_CHANNELZ_CHANNEL_NODE);
+ if (arg != nullptr && arg->type == GRPC_ARG_POINTER &&
+ arg->value.pointer.p != nullptr) {
+ channelz_node = static_cast(arg->value.pointer.p);
+ args_to_add.emplace_back(
+ channelz::MakeParentUuidArg(channelz_node->uuid()));
+ }
+ // Construct channel args.
+ grpc_channel_args* new_args = grpc_channel_args_copy_and_add_and_remove(
+ &args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), args_to_add.data(),
+ args_to_add.size());
+ // Make any necessary modifications for security.
+ return ModifyXdsChannelArgs(new_args);
+}
+
+} // namespace
+
+XdsClient::ChannelState::ChannelState(RefCountedPtr xds_client,
+ const char* balancer_name,
+ const grpc_channel_args& args)
+ : InternallyRefCounted(&grpc_xds_client_trace),
+ xds_client_(std::move(xds_client)) {
+ grpc_channel_args* new_args = BuildXdsChannelArgs(args);
+ channel_ = CreateXdsChannel(balancer_name, *new_args);
+ grpc_channel_args_destroy(new_args);
+ GPR_ASSERT(channel_ != nullptr);
+ StartConnectivityWatchLocked();
+}
+
+XdsClient::ChannelState::~ChannelState() {
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO, "[xds_client %p] Destroying xds channel %p", xds_client(),
+ this);
+ }
+ grpc_channel_destroy(channel_);
+}
+
+void XdsClient::ChannelState::Orphan() {
+ shutting_down_ = true;
+ CancelConnectivityWatchLocked();
+ eds_calld_.reset();
+ lrs_calld_.reset();
+ Unref(DEBUG_LOCATION, "ChannelState+orphaned");
+}
+
+void XdsClient::ChannelState::MaybeStartEdsCall() {
+ if (eds_calld_ != nullptr) return;
+ eds_calld_.reset(New>(
+ Ref(DEBUG_LOCATION, "ChannelState+eds")));
+}
+
+void XdsClient::ChannelState::StopEdsCall() { eds_calld_.reset(); }
+
+void XdsClient::ChannelState::MaybeStartLrsCall() {
+ if (lrs_calld_ != nullptr) return;
+ lrs_calld_.reset(New>(
+ Ref(DEBUG_LOCATION, "ChannelState+lrs")));
+}
+
+void XdsClient::ChannelState::StopLrsCall() { lrs_calld_.reset(); }
+
+void XdsClient::ChannelState::StartConnectivityWatchLocked() {
+ grpc_channel_element* client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_));
+ GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
+ watcher_ = New(Ref());
+ grpc_client_channel_start_connectivity_watch(
+ client_channel_elem, GRPC_CHANNEL_IDLE,
+ OrphanablePtr(watcher_));
+}
+
+void XdsClient::ChannelState::CancelConnectivityWatchLocked() {
+ grpc_channel_element* client_channel_elem =
+ grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_));
+ GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
+ grpc_client_channel_stop_connectivity_watch(client_channel_elem, watcher_);
+}
+
+//
+// XdsClient::ChannelState::RetryableCall<>
+//
+
+template
+XdsClient::ChannelState::RetryableCall::RetryableCall(
+ RefCountedPtr chand)
+ : chand_(std::move(chand)),
+ backoff_(
+ BackOff::Options()
+ .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS *
+ 1000)
+ .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER)
+ .set_jitter(GRPC_XDS_RECONNECT_JITTER)
+ .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) {
+ GRPC_CLOSURE_INIT(&on_retry_timer_, OnRetryTimerLocked, this,
+ grpc_combiner_scheduler(chand_->xds_client()->combiner_));
+ StartNewCallLocked();
+}
+
+template
+void XdsClient::ChannelState::RetryableCall::Orphan() {
+ shutting_down_ = true;
+ calld_.reset();
+ if (retry_timer_callback_pending_) grpc_timer_cancel(&retry_timer_);
+ this->Unref(DEBUG_LOCATION, "RetryableCall+orphaned");
+}
+
+template
+void XdsClient::ChannelState::RetryableCall::OnCallFinishedLocked() {
+ const bool seen_response = calld_->seen_response();
+ calld_.reset();
+ if (seen_response) {
+ // If we lost connection to the xds server, reset backoff and restart the
+ // call immediately.
+ backoff_.Reset();
+ StartNewCallLocked();
+ } else {
+ // If we failed to connect to the xds server, retry later.
+ StartRetryTimerLocked();
+ }
+}
+
+template
+void XdsClient::ChannelState::RetryableCall::StartNewCallLocked() {
+ if (shutting_down_) return;
+ GPR_ASSERT(chand_->channel_ != nullptr);
+ GPR_ASSERT(calld_ == nullptr);
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Start new call from retryable call (chand: %p, "
+ "retryable call: %p)",
+ chand()->xds_client(), chand(), this);
+ }
+ calld_ = MakeOrphanable(
+ this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
+}
+
+template
+void XdsClient::ChannelState::RetryableCall::StartRetryTimerLocked() {
+ if (shutting_down_) return;
+ const grpc_millis next_attempt_time = backoff_.NextAttemptTime();
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ grpc_millis timeout = GPR_MAX(next_attempt_time - ExecCtx::Get()->Now(), 0);
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Failed to connect to xds server (chand: %p) "
+ "retry timer will fire in %" PRId64 "ms.",
+ chand()->xds_client(), chand(), timeout);
+ }
+ this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release();
+ grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_);
+ retry_timer_callback_pending_ = true;
+}
+
+template
+void XdsClient::ChannelState::RetryableCall::OnRetryTimerLocked(
+ void* arg, grpc_error* error) {
+ RetryableCall* calld = static_cast(arg);
+ calld->retry_timer_callback_pending_ = false;
+ if (!calld->shutting_down_ && error == GRPC_ERROR_NONE) {
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(
+ GPR_INFO,
+ "[xds_client %p] Retry timer fires (chand: %p, retryable call: %p)",
+ calld->chand()->xds_client(), calld->chand(), calld);
+ }
+ calld->StartNewCallLocked();
+ }
+ calld->Unref(DEBUG_LOCATION, "RetryableCall+retry_timer_done");
+}
+
+//
+// XdsClient::ChannelState::EdsCallState
+//
+
+XdsClient::ChannelState::EdsCallState::EdsCallState(
+ RefCountedPtr> parent)
+ : InternallyRefCounted(&grpc_xds_client_trace),
+ parent_(std::move(parent)) {
+ // Init the EDS call. Note that the call will progress every time there's
+ // activity in xds_client()->interested_parties_, which is comprised of
+ // the polling entities from client_channel.
+ GPR_ASSERT(xds_client() != nullptr);
+ GPR_ASSERT(xds_client()->server_name_ != nullptr);
+ GPR_ASSERT(*xds_client()->server_name_.get() != '\0');
+ // Create a call with the specified method name.
+ call_ = grpc_channel_create_pollset_set_call(
+ chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
+ xds_client()->interested_parties_,
+ GRPC_MDSTR_SLASH_ENVOY_DOT_API_DOT_V2_DOT_ENDPOINTDISCOVERYSERVICE_SLASH_STREAMENDPOINTS,
+ nullptr, GRPC_MILLIS_INF_FUTURE, nullptr);
+ GPR_ASSERT(call_ != nullptr);
+ // Init the request payload.
+ grpc_slice request_payload_slice =
+ XdsEdsRequestCreateAndEncode(xds_client()->server_name_.get());
+ send_message_payload_ =
+ grpc_raw_byte_buffer_create(&request_payload_slice, 1);
+ grpc_slice_unref_internal(request_payload_slice);
+ // Init other data associated with the call.
+ grpc_metadata_array_init(&initial_metadata_recv_);
+ grpc_metadata_array_init(&trailing_metadata_recv_);
+ GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ // Start the call.
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Starting EDS call (chand: %p, calld: %p, "
+ "call: %p)",
+ xds_client(), chand(), this, call_);
+ }
+ // Create the ops.
+ grpc_call_error call_error;
+ grpc_op ops[3];
+ memset(ops, 0, sizeof(ops));
+ // Op: send initial metadata.
+ grpc_op* op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // Op: send request message.
+ GPR_ASSERT(send_message_payload_ != nullptr);
+ op->op = GRPC_OP_SEND_MESSAGE;
+ op->data.send_message.send_message = send_message_payload_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ nullptr);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ // Op: recv initial metadata.
+ op = ops;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata.recv_initial_metadata =
+ &initial_metadata_recv_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // Op: recv response.
+ op->op = GRPC_OP_RECV_MESSAGE;
+ op->data.recv_message.recv_message = &recv_message_payload_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ Ref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked").release();
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ &on_response_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ // Op: recv server status.
+ op = ops;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
+ op->data.recv_status_on_client.status = &status_code_;
+ op->data.recv_status_on_client.status_details = &status_details_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // This callback signals the end of the call, so it relies on the initial
+ // ref instead of a new ref. When it's invoked, it's the initial ref that is
+ // unreffed.
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ &on_status_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+}
+
+XdsClient::ChannelState::EdsCallState::~EdsCallState() {
+ grpc_metadata_array_destroy(&initial_metadata_recv_);
+ grpc_metadata_array_destroy(&trailing_metadata_recv_);
+ grpc_byte_buffer_destroy(send_message_payload_);
+ grpc_byte_buffer_destroy(recv_message_payload_);
+ grpc_slice_unref_internal(status_details_);
+ GPR_ASSERT(call_ != nullptr);
+ grpc_call_unref(call_);
+}
+
+void XdsClient::ChannelState::EdsCallState::Orphan() {
+ GPR_ASSERT(call_ != nullptr);
+ // If we are here because xds_client wants to cancel the call,
+ // on_status_received_ will complete the cancellation and clean up. Otherwise,
+ // we are here because xds_client has to orphan a failed call, then the
+ // following cancellation will be a no-op.
+ grpc_call_cancel(call_, nullptr);
+ // Note that the initial ref is hold by on_status_received_. So the
+ // corresponding unref happens in on_status_received_ instead of here.
+}
+
+void XdsClient::ChannelState::EdsCallState::OnResponseReceivedLocked(
+ void* arg, grpc_error* error) {
+ EdsCallState* eds_calld = static_cast(arg);
+ XdsClient* xds_client = eds_calld->xds_client();
+ // Empty payload means the call was cancelled.
+ if (!eds_calld->IsCurrentCallOnChannel() ||
+ eds_calld->recv_message_payload_ == nullptr) {
+ eds_calld->Unref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked");
+ return;
+ }
+ // Read the response.
+ grpc_byte_buffer_reader bbr;
+ grpc_byte_buffer_reader_init(&bbr, eds_calld->recv_message_payload_);
+ grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
+ grpc_byte_buffer_reader_destroy(&bbr);
+ grpc_byte_buffer_destroy(eds_calld->recv_message_payload_);
+ eds_calld->recv_message_payload_ = nullptr;
+ // TODO(juanlishen): When we convert this to use the xds protocol, the
+ // balancer will send us a fallback timeout such that we should go into
+ // fallback mode if we have lost contact with the balancer after a certain
+ // period of time. We will need to save the timeout value here, and then
+ // when the balancer call ends, we will need to start a timer for the
+ // specified period of time, and if the timer fires, we go into fallback
+ // mode. We will also need to cancel the timer when we receive a serverlist
+ // from the balancer.
+ // This anonymous lambda is a hack to avoid the usage of goto.
+ [&]() {
+ // Parse the response.
+ EdsUpdate update;
+ grpc_error* parse_error =
+ XdsEdsResponseDecodeAndParse(response_slice, &update);
+ if (parse_error != GRPC_ERROR_NONE) {
+ gpr_log(GPR_ERROR,
+ "[xds_client %p] EDS response parsing failed. error=%s",
+ xds_client, grpc_error_string(parse_error));
+ GRPC_ERROR_UNREF(parse_error);
+ return;
+ }
+ if (update.priority_list_update.empty() && !update.drop_all) {
+ char* response_slice_str =
+ grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX);
+ gpr_log(GPR_ERROR,
+ "[xds_client %p] EDS response '%s' doesn't contain any valid "
+ "locality but doesn't require to drop all calls. Ignoring.",
+ xds_client, response_slice_str);
+ gpr_free(response_slice_str);
+ return;
+ }
+ eds_calld->seen_response_ = true;
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] EDS response with %" PRIuPTR
+ " priorities and %" PRIuPTR
+ " drop categories received (drop_all=%d)",
+ xds_client, update.priority_list_update.size(),
+ update.drop_config->drop_category_list().size(), update.drop_all);
+ for (size_t priority = 0; priority < update.priority_list_update.size();
+ ++priority) {
+ const auto* locality_map_update =
+ update.priority_list_update.Find(static_cast(priority));
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Priority %" PRIuPTR " contains %" PRIuPTR
+ " localities",
+ xds_client, priority, locality_map_update->size());
+ size_t locality_count = 0;
+ for (const auto& p : locality_map_update->localities) {
+ const auto& locality = p.second;
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Priority %" PRIuPTR ", locality %" PRIuPTR
+ " %s contains %" PRIuPTR " server addresses",
+ xds_client, priority, locality_count,
+ locality.name->AsHumanReadableString(),
+ locality.serverlist.size());
+ for (size_t i = 0; i < locality.serverlist.size(); ++i) {
+ char* ipport;
+ grpc_sockaddr_to_string(&ipport, &locality.serverlist[i].address(),
+ false);
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Priority %" PRIuPTR ", locality %" PRIuPTR
+ " %s, server address %" PRIuPTR ": %s",
+ xds_client, priority, locality_count,
+ locality.name->AsHumanReadableString(), i, ipport);
+ gpr_free(ipport);
+ }
+ ++locality_count;
+ }
+ }
+ for (size_t i = 0; i < update.drop_config->drop_category_list().size();
+ ++i) {
+ const XdsDropConfig::DropCategory& drop_category =
+ update.drop_config->drop_category_list()[i];
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Drop category %s has drop rate %d per million",
+ xds_client, drop_category.name.get(),
+ drop_category.parts_per_million);
+ }
+ }
+ // Start load reporting if needed.
+ LrsCallState* lrs_calld = eds_calld->chand()->lrs_calld_->calld();
+ if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked();
+ // Ignore identical update.
+ const EdsUpdate& prev_update = xds_client->cluster_state_.eds_update;
+ const bool priority_list_changed =
+ prev_update.priority_list_update != update.priority_list_update;
+ const bool drop_config_changed =
+ prev_update.drop_config == nullptr ||
+ *prev_update.drop_config != *update.drop_config;
+ if (!priority_list_changed && !drop_config_changed) {
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] EDS update identical to current, ignoring.",
+ xds_client);
+ }
+ return;
+ }
+ // Update the cluster state.
+ ClusterState& cluster_state = xds_client->cluster_state_;
+ cluster_state.eds_update = std::move(update);
+ // Notify all watchers.
+ for (const auto& p : cluster_state.endpoint_watchers) {
+ p.first->OnEndpointChanged(cluster_state.eds_update);
+ }
+ }();
+ grpc_slice_unref_internal(response_slice);
+ if (xds_client->shutting_down_) {
+ eds_calld->Unref(DEBUG_LOCATION,
+ "EDS+OnResponseReceivedLocked+xds_shutdown");
+ return;
+ }
+ // Keep listening for serverlist updates.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_MESSAGE;
+ op.data.recv_message.recv_message = &eds_calld->recv_message_payload_;
+ op.flags = 0;
+ op.reserved = nullptr;
+ GPR_ASSERT(eds_calld->call_ != nullptr);
+ // Reuse the "EDS+OnResponseReceivedLocked" ref taken in ctor.
+ const grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ eds_calld->call_, &op, 1, &eds_calld->on_response_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+}
+
+void XdsClient::ChannelState::EdsCallState::OnStatusReceivedLocked(
+ void* arg, grpc_error* error) {
+ EdsCallState* eds_calld = static_cast(arg);
+ ChannelState* chand = eds_calld->chand();
+ XdsClient* xds_client = eds_calld->xds_client();
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ char* status_details = grpc_slice_to_c_string(eds_calld->status_details_);
+ gpr_log(GPR_INFO,
+ "[xds_client %p] EDS call status received. Status = %d, details "
+ "= '%s', (chand: %p, eds_calld: %p, call: %p), error '%s'",
+ xds_client, eds_calld->status_code_, status_details, chand,
+ eds_calld, eds_calld->call_, grpc_error_string(error));
+ gpr_free(status_details);
+ }
+ // Ignore status from a stale call.
+ if (eds_calld->IsCurrentCallOnChannel()) {
+ // Try to restart the call.
+ eds_calld->parent_->OnCallFinishedLocked();
+ // Send error to all watchers.
+ xds_client->NotifyOnError(
+ GRPC_ERROR_CREATE_FROM_STATIC_STRING("xds call failed"));
+ }
+ eds_calld->Unref(DEBUG_LOCATION, "EDS+OnStatusReceivedLocked");
+}
+
+bool XdsClient::ChannelState::EdsCallState::IsCurrentCallOnChannel() const {
+ // If the retryable EDS call is null (which only happens when the xds channel
+ // is shutting down), all the EDS calls are stale.
+ if (chand()->eds_calld_ == nullptr) return false;
+ return this == chand()->eds_calld_->calld();
+}
+
+//
+// XdsClient::ChannelState::LrsCallState::Reporter
+//
+
+void XdsClient::ChannelState::LrsCallState::Reporter::Orphan() {
+ if (next_report_timer_callback_pending_) {
+ grpc_timer_cancel(&next_report_timer_);
+ }
+}
+
+void XdsClient::ChannelState::LrsCallState::Reporter::
+ ScheduleNextReportLocked() {
+ const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_;
+ grpc_timer_init(&next_report_timer_, next_report_time,
+ &on_next_report_timer_);
+ next_report_timer_callback_pending_ = true;
+}
+
+void XdsClient::ChannelState::LrsCallState::Reporter::OnNextReportTimerLocked(
+ void* arg, grpc_error* error) {
+ Reporter* self = static_cast(arg);
+ self->next_report_timer_callback_pending_ = false;
+ if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
+ self->Unref(DEBUG_LOCATION, "Reporter+timer");
+ return;
+ }
+ self->SendReportLocked();
+}
+
+void XdsClient::ChannelState::LrsCallState::Reporter::SendReportLocked() {
+ // Create a request that contains the load report.
+ // TODO(roth): Currently, it is not possible to have multiple client
+ // stats objects for a given cluster. However, in the future, we may
+ // run into cases where this happens (e.g., due to graceful LB policy
+ // switching). If/when this becomes a problem, replace this assertion
+ // with code to merge data from multiple client stats objects.
+ GPR_ASSERT(xds_client()->cluster_state_.client_stats.size() == 1);
+ auto* client_stats = *xds_client()->cluster_state_.client_stats.begin();
+ grpc_slice request_payload_slice =
+ XdsLrsRequestCreateAndEncode(parent_->cluster_name_.get(), client_stats);
+ // Skip client load report if the counters were all zero in the last
+ // report and they are still zero in this one.
+ const bool old_val = last_report_counters_were_zero_;
+ last_report_counters_were_zero_ = static_cast(
+ grpc_slice_eq(request_payload_slice, grpc_empty_slice()));
+ if (old_val && last_report_counters_were_zero_) {
+ ScheduleNextReportLocked();
+ return;
+ }
+ parent_->send_message_payload_ =
+ grpc_raw_byte_buffer_create(&request_payload_slice, 1);
+ grpc_slice_unref_internal(request_payload_slice);
+ // Send the report.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_SEND_MESSAGE;
+ op.data.send_message.send_message = parent_->send_message_payload_;
+ grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ parent_->call_, &op, 1, &on_report_done_);
+ if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) {
+ gpr_log(GPR_ERROR,
+ "[xds_client %p] calld=%p call_error=%d sending client load report",
+ xds_client(), this, call_error);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ }
+}
+
+void XdsClient::ChannelState::LrsCallState::Reporter::OnReportDoneLocked(
+ void* arg, grpc_error* error) {
+ Reporter* self = static_cast(arg);
+ grpc_byte_buffer_destroy(self->parent_->send_message_payload_);
+ self->parent_->send_message_payload_ = nullptr;
+ if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) {
+ // If this reporter is no longer the current one on the call, the reason
+ // might be that it was orphaned for a new one due to config update.
+ if (!self->IsCurrentReporterOnCall()) {
+ self->parent_->MaybeStartReportingLocked();
+ }
+ self->Unref(DEBUG_LOCATION, "Reporter+report_done");
+ return;
+ }
+ self->ScheduleNextReportLocked();
+}
+
+//
+// XdsClient::ChannelState::LrsCallState
+//
+
+XdsClient::ChannelState::LrsCallState::LrsCallState(
+ RefCountedPtr> parent)
+ : InternallyRefCounted(&grpc_xds_client_trace),
+ parent_(std::move(parent)) {
+ // Init the LRS call. Note that the call will progress every time there's
+ // activity in xds_client()->interested_parties_, which is comprised of
+ // the polling entities from client_channel.
+ GPR_ASSERT(xds_client() != nullptr);
+ GPR_ASSERT(xds_client()->server_name_ != nullptr);
+ GPR_ASSERT(*xds_client()->server_name_.get() != '\0');
+ call_ = grpc_channel_create_pollset_set_call(
+ chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS,
+ xds_client()->interested_parties_,
+ GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_LOAD_STATS_DOT_V2_DOT_LOADREPORTINGSERVICE_SLASH_STREAMLOADSTATS,
+ nullptr, GRPC_MILLIS_INF_FUTURE, nullptr);
+ GPR_ASSERT(call_ != nullptr);
+ // Init the request payload.
+ grpc_slice request_payload_slice =
+ XdsLrsRequestCreateAndEncode(xds_client()->server_name_.get());
+ send_message_payload_ =
+ grpc_raw_byte_buffer_create(&request_payload_slice, 1);
+ grpc_slice_unref_internal(request_payload_slice);
+ // Init other data associated with the LRS call.
+ grpc_metadata_array_init(&initial_metadata_recv_);
+ grpc_metadata_array_init(&trailing_metadata_recv_);
+ GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSentLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this,
+ grpc_combiner_scheduler(xds_client()->combiner_));
+ // Start the call.
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Starting LRS call (chand: %p, calld: %p, "
+ "call: %p)",
+ xds_client(), chand(), this, call_);
+ }
+ // Create the ops.
+ grpc_call_error call_error;
+ grpc_op ops[3];
+ memset(ops, 0, sizeof(ops));
+ // Op: send initial metadata.
+ grpc_op* op = ops;
+ op->op = GRPC_OP_SEND_INITIAL_METADATA;
+ op->data.send_initial_metadata.count = 0;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // Op: send request message.
+ GPR_ASSERT(send_message_payload_ != nullptr);
+ op->op = GRPC_OP_SEND_MESSAGE;
+ op->data.send_message.send_message = send_message_payload_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release();
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ &on_initial_request_sent_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ // Op: recv initial metadata.
+ op = ops;
+ op->op = GRPC_OP_RECV_INITIAL_METADATA;
+ op->data.recv_initial_metadata.recv_initial_metadata =
+ &initial_metadata_recv_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // Op: recv response.
+ op->op = GRPC_OP_RECV_MESSAGE;
+ op->data.recv_message.recv_message = &recv_message_payload_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release();
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ &on_response_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+ // Op: recv server status.
+ op = ops;
+ op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+ op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_;
+ op->data.recv_status_on_client.status = &status_code_;
+ op->data.recv_status_on_client.status_details = &status_details_;
+ op->flags = 0;
+ op->reserved = nullptr;
+ op++;
+ // This callback signals the end of the call, so it relies on the initial
+ // ref instead of a new ref. When it's invoked, it's the initial ref that is
+ // unreffed.
+ call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
+ &on_status_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+}
+
+XdsClient::ChannelState::LrsCallState::~LrsCallState() {
+ grpc_metadata_array_destroy(&initial_metadata_recv_);
+ grpc_metadata_array_destroy(&trailing_metadata_recv_);
+ grpc_byte_buffer_destroy(send_message_payload_);
+ grpc_byte_buffer_destroy(recv_message_payload_);
+ grpc_slice_unref_internal(status_details_);
+ GPR_ASSERT(call_ != nullptr);
+ grpc_call_unref(call_);
+}
+
+void XdsClient::ChannelState::LrsCallState::Orphan() {
+ reporter_.reset();
+ GPR_ASSERT(call_ != nullptr);
+ // If we are here because xds_client wants to cancel the call,
+ // on_status_received_ will complete the cancellation and clean up. Otherwise,
+ // we are here because xds_client has to orphan a failed call, then the
+ // following cancellation will be a no-op.
+ grpc_call_cancel(call_, nullptr);
+ // Note that the initial ref is hold by on_status_received_. So the
+ // corresponding unref happens in on_status_received_ instead of here.
+}
+
+void XdsClient::ChannelState::LrsCallState::MaybeStartReportingLocked() {
+ // Don't start again if already started.
+ if (reporter_ != nullptr) return;
+ // Don't start if the previous send_message op (of the initial request or the
+ // last report of the previous reporter) hasn't completed.
+ if (send_message_payload_ != nullptr) return;
+ // Don't start if no LRS response has arrived.
+ if (!seen_response()) return;
+ // Don't start if the EDS call hasn't received any valid response. Note that
+ // this must be the first channel because it is the current channel but its
+ // EDS call hasn't seen any response.
+ EdsCallState* eds_calld = chand()->eds_calld_->calld();
+ if (eds_calld == nullptr || !eds_calld->seen_response()) return;
+ // Start reporting.
+ for (auto* client_stats : chand()->xds_client_->cluster_state_.client_stats) {
+ client_stats->MaybeInitLastReportTime();
+ }
+ reporter_ = MakeOrphanable(
+ Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_);
+}
+
+void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked(
+ void* arg, grpc_error* error) {
+ LrsCallState* lrs_calld = static_cast(arg);
+ // Clear the send_message_payload_.
+ grpc_byte_buffer_destroy(lrs_calld->send_message_payload_);
+ lrs_calld->send_message_payload_ = nullptr;
+ lrs_calld->MaybeStartReportingLocked();
+ lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked");
+}
+
+void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked(
+ void* arg, grpc_error* error) {
+ LrsCallState* lrs_calld = static_cast(arg);
+ XdsClient* xds_client = lrs_calld->xds_client();
+ // Empty payload means the call was cancelled.
+ if (!lrs_calld->IsCurrentCallOnChannel() ||
+ lrs_calld->recv_message_payload_ == nullptr) {
+ lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked");
+ return;
+ }
+ // Read the response.
+ grpc_byte_buffer_reader bbr;
+ grpc_byte_buffer_reader_init(&bbr, lrs_calld->recv_message_payload_);
+ grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
+ grpc_byte_buffer_reader_destroy(&bbr);
+ grpc_byte_buffer_destroy(lrs_calld->recv_message_payload_);
+ lrs_calld->recv_message_payload_ = nullptr;
+ // This anonymous lambda is a hack to avoid the usage of goto.
+ [&]() {
+ // Parse the response.
+ UniquePtr new_cluster_name;
+ grpc_millis new_load_reporting_interval;
+ grpc_error* parse_error = XdsLrsResponseDecodeAndParse(
+ response_slice, &new_cluster_name, &new_load_reporting_interval);
+ if (parse_error != GRPC_ERROR_NONE) {
+ gpr_log(GPR_ERROR,
+ "[xds_client %p] LRS response parsing failed. error=%s",
+ xds_client, grpc_error_string(parse_error));
+ GRPC_ERROR_UNREF(parse_error);
+ return;
+ }
+ lrs_calld->seen_response_ = true;
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] LRS response received, cluster_name=%s, "
+ "load_report_interval=%" PRId64 "ms",
+ xds_client, new_cluster_name.get(), new_load_reporting_interval);
+ }
+ if (new_load_reporting_interval <
+ GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) {
+ new_load_reporting_interval =
+ GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS;
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Increased load_report_interval to minimum "
+ "value %dms",
+ xds_client, GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
+ }
+ }
+ // Ignore identical update.
+ if (lrs_calld->load_reporting_interval_ == new_load_reporting_interval &&
+ strcmp(lrs_calld->cluster_name_.get(), new_cluster_name.get()) == 0) {
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ gpr_log(GPR_INFO,
+ "[xds_client %p] Incoming LRS response identical to current, "
+ "ignoring.",
+ xds_client);
+ }
+ return;
+ }
+ // Stop current load reporting (if any) to adopt the new config.
+ lrs_calld->reporter_.reset();
+ // Record the new config.
+ lrs_calld->cluster_name_ = std::move(new_cluster_name);
+ lrs_calld->load_reporting_interval_ = new_load_reporting_interval;
+ // Try starting sending load report.
+ lrs_calld->MaybeStartReportingLocked();
+ }();
+ grpc_slice_unref_internal(response_slice);
+ if (xds_client->shutting_down_) {
+ lrs_calld->Unref(DEBUG_LOCATION,
+ "LRS+OnResponseReceivedLocked+xds_shutdown");
+ return;
+ }
+ // Keep listening for LRS config updates.
+ grpc_op op;
+ memset(&op, 0, sizeof(op));
+ op.op = GRPC_OP_RECV_MESSAGE;
+ op.data.recv_message.recv_message = &lrs_calld->recv_message_payload_;
+ op.flags = 0;
+ op.reserved = nullptr;
+ GPR_ASSERT(lrs_calld->call_ != nullptr);
+ // Reuse the "OnResponseReceivedLocked" ref taken in ctor.
+ const grpc_call_error call_error = grpc_call_start_batch_and_execute(
+ lrs_calld->call_, &op, 1, &lrs_calld->on_response_received_);
+ GPR_ASSERT(GRPC_CALL_OK == call_error);
+}
+
+void XdsClient::ChannelState::LrsCallState::OnStatusReceivedLocked(
+ void* arg, grpc_error* error) {
+ LrsCallState* lrs_calld = static_cast(arg);
+ XdsClient* xds_client = lrs_calld->xds_client();
+ ChannelState* chand = lrs_calld->chand();
+ GPR_ASSERT(lrs_calld->call_ != nullptr);
+ if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
+ char* status_details = grpc_slice_to_c_string(lrs_calld->status_details_);
+ gpr_log(GPR_INFO,
+ "[xds_client %p] LRS call status received. Status = %d, details "
+ "= '%s', (chand: %p, calld: %p, call: %p), error '%s'",
+ xds_client, lrs_calld->status_code_, status_details, chand,
+ lrs_calld, lrs_calld->call_, grpc_error_string(error));
+ gpr_free(status_details);
+ }
+ // Ignore status from a stale call.
+ if (lrs_calld->IsCurrentCallOnChannel()) {
+ GPR_ASSERT(!xds_client->shutting_down_);
+ // Try to restart the call.
+ lrs_calld->parent_->OnCallFinishedLocked();
+ }
+ lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnStatusReceivedLocked");
+}
+
+bool XdsClient::ChannelState::LrsCallState::IsCurrentCallOnChannel() const {
+ // If the retryable LRS call is null (which only happens when the xds channel
+ // is shutting down), all the LRS calls are stale.
+ if (chand()->lrs_calld_ == nullptr) return false;
+ return this == chand()->lrs_calld_->calld();
+}
+
+//
+// XdsClient
+//
+
+XdsClient::XdsClient(grpc_combiner* combiner,
+ grpc_pollset_set* interested_parties,
+ const char* balancer_name, StringView server_name,
+ UniquePtr watcher,
+ const grpc_channel_args& channel_args)
+ : combiner_(GRPC_COMBINER_REF(combiner, "xds_client")),
+ interested_parties_(interested_parties),
+ server_name_(server_name.dup()),
+ service_config_watcher_(std::move(watcher)),
+ chand_(MakeOrphanable(
+ Ref(DEBUG_LOCATION, "XdsClient+ChannelState"), balancer_name,
+ channel_args)) {
+ // TODO(roth): Start LDS call.
+}
+
+XdsClient::~XdsClient() { GRPC_COMBINER_UNREF(combiner_, "xds_client"); }
+
+void XdsClient::Orphan() {
+ shutting_down_ = true;
+ chand_.reset();
+ Unref(DEBUG_LOCATION, "XdsClient::Orphan()");
+}
+
+void XdsClient::WatchClusterData(StringView cluster,
+ UniquePtr watcher) {
+ // TODO(roth): Implement.
+}
+
+void XdsClient::CancelClusterDataWatch(StringView cluster,
+ ClusterWatcherInterface* watcher) {
+ // TODO(roth): Implement.
+}
+
+void XdsClient::WatchEndpointData(StringView cluster,
+ UniquePtr watcher) {
+ EndpointWatcherInterface* w = watcher.get();
+ cluster_state_.endpoint_watchers[w] = std::move(watcher);
+ // If we've already received an EDS update, notify the new watcher
+ // immediately.
+ if (!cluster_state_.eds_update.priority_list_update.empty()) {
+ w->OnEndpointChanged(cluster_state_.eds_update);
+ }
+ chand_->MaybeStartEdsCall();
+}
+
+void XdsClient::CancelEndpointDataWatch(StringView cluster,
+ EndpointWatcherInterface* watcher) {
+ auto it = cluster_state_.endpoint_watchers.find(watcher);
+ if (it != cluster_state_.endpoint_watchers.end()) {
+ cluster_state_.endpoint_watchers.erase(it);
+ }
+ if (cluster_state_.endpoint_watchers.empty()) chand_->StopEdsCall();
+}
+
+void XdsClient::AddClientStats(StringView cluster,
+ XdsClientStats* client_stats) {
+ cluster_state_.client_stats.insert(client_stats);
+ chand_->MaybeStartLrsCall();
+}
+
+void XdsClient::RemoveClientStats(StringView cluster,
+ XdsClientStats* client_stats) {
+ // TODO(roth): In principle, we should try to send a final load report
+ // containing whatever final stats have been accumulated since the
+ // last load report.
+ auto it = cluster_state_.client_stats.find(client_stats);
+ if (it != cluster_state_.client_stats.end()) {
+ cluster_state_.client_stats.erase(it);
+ }
+ if (cluster_state_.client_stats.empty()) chand_->StopLrsCall();
+}
+
+void XdsClient::ResetBackoff() {
+ if (chand_ != nullptr) {
+ grpc_channel_reset_connect_backoff(chand_->channel());
+ }
+}
+
+void XdsClient::NotifyOnError(grpc_error* error) {
+ // TODO(roth): Once we implement the full LDS flow, it will not be
+ // necessary to check for the service config watcher being non-null,
+ // because that will always be true.
+ if (service_config_watcher_ != nullptr) {
+ service_config_watcher_->OnError(GRPC_ERROR_REF(error));
+ }
+ for (const auto& p : cluster_state_.cluster_watchers) {
+ p.first->OnError(GRPC_ERROR_REF(error));
+ }
+ for (const auto& p : cluster_state_.endpoint_watchers) {
+ p.first->OnError(GRPC_ERROR_REF(error));
+ }
+ GRPC_ERROR_UNREF(error);
+}
+
+void* XdsClient::ChannelArgCopy(void* p) {
+ XdsClient* xds_client = static_cast(p);
+ xds_client->Ref().release();
+ return p;
+}
+
+void XdsClient::ChannelArgDestroy(void* p) {
+ XdsClient* xds_client = static_cast(p);
+ xds_client->Unref();
+}
+
+int XdsClient::ChannelArgCmp(void* p, void* q) { return GPR_ICMP(p, q); }
+
+const grpc_arg_pointer_vtable XdsClient::kXdsClientVtable = {
+ XdsClient::ChannelArgCopy, XdsClient::ChannelArgDestroy,
+ XdsClient::ChannelArgCmp};
+
+grpc_arg XdsClient::MakeChannelArg() const {
+ return grpc_channel_arg_pointer_create(const_cast(GRPC_ARG_XDS_CLIENT),
+ const_cast(this),
+ &XdsClient::kXdsClientVtable);
+}
+
+RefCountedPtr XdsClient::GetFromChannelArgs(
+ const grpc_channel_args& args) {
+ XdsClient* xds_client =
+ grpc_channel_args_find_pointer(&args, GRPC_ARG_XDS_CLIENT);
+ if (xds_client != nullptr) return xds_client->Ref();
+ return nullptr;
+}
+
+} // namespace grpc_core
diff --git a/src/core/ext/filters/client_channel/xds/xds_client.h b/src/core/ext/filters/client_channel/xds/xds_client.h
new file mode 100644
index 00000000000..ef4210acb3d
--- /dev/null
+++ b/src/core/ext/filters/client_channel/xds/xds_client.h
@@ -0,0 +1,153 @@
+//
+// Copyright 2019 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_H
+
+#include
+
+#include "src/core/ext/filters/client_channel/service_config.h"
+#include "src/core/ext/filters/client_channel/xds/xds_api.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
+#include "src/core/lib/gprpp/map.h"
+#include "src/core/lib/gprpp/memory.h"
+#include "src/core/lib/gprpp/orphanable.h"
+#include "src/core/lib/gprpp/ref_counted.h"
+#include "src/core/lib/gprpp/ref_counted_ptr.h"
+#include "src/core/lib/gprpp/set.h"
+#include "src/core/lib/gprpp/string_view.h"
+#include "src/core/lib/iomgr/combiner.h"
+
+namespace grpc_core {
+
+extern TraceFlag xds_client_trace;
+
+class XdsClient : public InternallyRefCounted {
+ public:
+ // Service config watcher interface. Implemented by callers.
+ class ServiceConfigWatcherInterface {
+ public:
+ virtual ~ServiceConfigWatcherInterface() = default;
+
+ virtual void OnServiceConfigChanged(
+ RefCountedPtr service_config) = 0;
+
+ virtual void OnError(grpc_error* error) = 0;
+ };
+
+ // Cluster data watcher interface. Implemented by callers.
+ class ClusterWatcherInterface {
+ public:
+ virtual ~ClusterWatcherInterface() = default;
+
+ virtual void OnClusterChanged(CdsUpdate cluster_data) = 0;
+
+ virtual void OnError(grpc_error* error) = 0;
+ };
+
+ // Endpoint data watcher interface. Implemented by callers.
+ class EndpointWatcherInterface {
+ public:
+ virtual ~EndpointWatcherInterface() = default;
+
+ virtual void OnEndpointChanged(EdsUpdate update) = 0;
+
+ virtual void OnError(grpc_error* error) = 0;
+ };
+
+ XdsClient(grpc_combiner* combiner, grpc_pollset_set* interested_parties,
+ const char* balancer_name, StringView server_name,
+ UniquePtr watcher,
+ const grpc_channel_args& channel_args);
+ ~XdsClient();
+
+ void Orphan() override;
+
+ // Start and cancel cluster data watch for a cluster.
+ // The XdsClient takes ownership of the watcher, but the caller may
+ // keep a raw pointer to the watcher, which may be used only for
+ // cancellation. (Because the caller does not own the watcher, the
+ // pointer must not be used for any other purpose.)
+ void WatchClusterData(StringView cluster,
+ UniquePtr watcher);
+ void CancelClusterDataWatch(StringView cluster,
+ ClusterWatcherInterface* watcher);
+
+ // Start and cancel endpoint data watch for a cluster.
+ // The XdsClient takes ownership of the watcher, but the caller may
+ // keep a raw pointer to the watcher, which may be used only for
+ // cancellation. (Because the caller does not own the watcher, the
+ // pointer must not be used for any other purpose.)
+ void WatchEndpointData(StringView cluster,
+ UniquePtr watcher);
+ void CancelEndpointDataWatch(StringView cluster,
+ EndpointWatcherInterface* watcher);
+
+ // Adds and removes client stats for cluster.
+ void AddClientStats(StringView cluster, XdsClientStats* client_stats);
+ void RemoveClientStats(StringView cluster, XdsClientStats* client_stats);
+
+ // Resets connection backoff state.
+ void ResetBackoff();
+
+ // Helpers for encoding the XdsClient object in channel args.
+ grpc_arg MakeChannelArg() const;
+ static RefCountedPtr GetFromChannelArgs(
+ const grpc_channel_args& args);
+
+ private:
+ class ChannelState;
+
+ struct ClusterState {
+ Map>
+ cluster_watchers;
+ Map>
+ endpoint_watchers;
+ Set client_stats;
+ // The latest data seen from EDS.
+ EdsUpdate eds_update;
+ };
+
+ // Sends an error notification to all watchers.
+ void NotifyOnError(grpc_error* error);
+
+ // Channel arg vtable functions.
+ static void* ChannelArgCopy(void* p);
+ static void ChannelArgDestroy(void* p);
+ static int ChannelArgCmp(void* p, void* q);
+
+ static const grpc_arg_pointer_vtable kXdsClientVtable;
+
+ grpc_combiner* combiner_;
+ grpc_pollset_set* interested_parties_;
+
+ UniquePtr server_name_;
+ UniquePtr service_config_watcher_;
+
+ // The channel for communicating with the xds server.
+ OrphanablePtr chand_;
+
+ // TODO(roth): When we need support for multiple clusters, replace
+ // cluster_state_ with a map keyed by cluster name.
+ ClusterState cluster_state_;
+ // Map clusters_;
+
+ bool shutting_down_ = false;
+};
+
+} // namespace grpc_core
+
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc b/src/core/ext/filters/client_channel/xds/xds_client_stats.cc
similarity index 98%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
rename to src/core/ext/filters/client_channel/xds/xds_client_stats.cc
index f2f06b467f2..2f36c3d979f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc
+++ b/src/core/ext/filters/client_channel/xds/xds_client_stats.cc
@@ -18,7 +18,7 @@
#include
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h"
+#include "src/core/ext/filters/client_channel/xds/xds_client_stats.h"
#include
#include
diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h b/src/core/ext/filters/client_channel/xds/xds_client_stats.h
similarity index 97%
rename from src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h
rename to src/core/ext/filters/client_channel/xds/xds_client_stats.h
index 6e8dd961ea9..9bbd3e44e8f 100644
--- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h
+++ b/src/core/ext/filters/client_channel/xds/xds_client_stats.h
@@ -16,8 +16,8 @@
*
*/
-#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H
-#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H
+#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_STATS_H
+#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_STATS_H
#include
@@ -226,5 +226,4 @@ class XdsClientStats {
} // namespace grpc_core
-#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H \
- */
+#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_XDS_XDS_CLIENT_STATS_H */
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index f6b4ace2c87..df105fd944f 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -100,6 +100,14 @@ bool grpc_channel_arg_get_bool(const grpc_arg* arg, bool default_value);
bool grpc_channel_args_find_bool(const grpc_channel_args* args,
const char* name, bool default_value);
+template
+T* grpc_channel_args_find_pointer(const grpc_channel_args* args,
+ const char* name) {
+ const grpc_arg* arg = grpc_channel_args_find(args, name);
+ if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return nullptr;
+ return static_cast(arg->value.pointer.p);
+}
+
// Helpers for creating channel args.
grpc_arg grpc_channel_arg_string_create(char* name, char* value);
grpc_arg grpc_channel_arg_integer_create(char* name, int value);
diff --git a/src/core/lib/security/security_connector/fake/fake_security_connector.cc b/src/core/lib/security/security_connector/fake/fake_security_connector.cc
index 6a010740dff..45e8327d241 100644
--- a/src/core/lib/security/security_connector/fake/fake_security_connector.cc
+++ b/src/core/lib/security/security_connector/fake/fake_security_connector.cc
@@ -27,7 +27,7 @@
#include
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h"
-#include "src/core/ext/filters/client_channel/lb_policy/xds/xds.h"
+#include "src/core/ext/filters/client_channel/xds/xds_channel_args.h"
#include "src/core/ext/transport/chttp2/alpn/alpn.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
@@ -56,8 +56,8 @@ class grpc_fake_channel_security_connector final
expected_targets_(
gpr_strdup(grpc_fake_transport_get_expected_targets(args))),
is_lb_channel_(
- grpc_channel_args_find(
- args, GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER) != nullptr ||
+ grpc_channel_args_find(args, GRPC_ARG_ADDRESS_IS_XDS_SERVER) !=
+ nullptr ||
grpc_channel_args_find(
args, GRPC_ARG_ADDRESS_IS_GRPCLB_LOAD_BALANCER) != nullptr) {
const grpc_arg* target_name_override_arg =
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 537cbf29855..9794049604a 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -386,9 +386,10 @@ CORE_SOURCE_FILES = [
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc',
- 'src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_api.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_channel_secure.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client.cc',
+ 'src/core/ext/filters/client_channel/xds/xds_client_stats.cc',
'src/core/ext/upb-generated/envoy/api/v2/auth/cert.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cds.upb.c',
'src/core/ext/upb-generated/envoy/api/v2/cluster/circuit_breaker.upb.c',
diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc
index 942216028e6..b637f6044fb 100644
--- a/test/cpp/end2end/xds_end2end_test.cc
+++ b/test/cpp/end2end/xds_end2end_test.cc
@@ -2049,70 +2049,6 @@ TEST_F(BalancerUpdateTest, Repeated) {
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
}
-// Tests that if the balancer name changes, a new LB channel will be created to
-// replace the old one.
-TEST_F(BalancerUpdateTest, UpdateBalancerName) {
- SetNextResolution({}, kDefaultServiceConfig_.c_str());
- SetNextResolutionForLbChannelAllBalancers();
- EdsServiceImpl::ResponseArgs args({
- {"locality0", {backends_[0]->port()}},
- });
- ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(args), 0);
- args = EdsServiceImpl::ResponseArgs({
- {"locality0", {backends_[1]->port()}},
- });
- ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(args), 0);
- // Wait until the first backend is ready.
- WaitForBackend(0);
- // Send 10 requests.
- gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
- // All 10 requests should have gone to the first backend.
- EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
- // The EDS service of balancer 0 got a single request, and sent a single
- // response.
- EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
- EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count());
- EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count());
- EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
- EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
- std::vector ports;
- ports.emplace_back(balancers_[1]->port());
- auto new_lb_channel_response_generator =
- grpc_core::MakeRefCounted();
- SetNextResolutionForLbChannel(ports, nullptr,
- new_lb_channel_response_generator.get());
- gpr_log(GPR_INFO, "========= ABOUT TO UPDATE BALANCER NAME ==========");
- SetNextResolution({},
- "{\n"
- " \"loadBalancingConfig\":[\n"
- " { \"does_not_exist\":{} },\n"
- " { \"xds_experimental\":{ \"balancerName\": "
- "\"fake:///updated_lb\" } }\n"
- " ]\n"
- "}",
- new_lb_channel_response_generator.get());
- gpr_log(GPR_INFO, "========= UPDATED BALANCER NAME ==========");
- // Wait until update has been processed, as signaled by the second backend
- // receiving a request.
- EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
- WaitForBackend(1);
- backends_[1]->backend_service()->ResetCounters();
- gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
- CheckRpcSendOk(10);
- gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
- // All 10 requests should have gone to the second backend.
- EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
- EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
- EXPECT_EQ(1U, balancers_[1]->eds_service()->request_count());
- EXPECT_EQ(1U, balancers_[1]->eds_service()->response_count());
- EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count());
- EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count());
-}
-
// Tests that if the balancer is down, the RPCs will still be sent to the
// backends according to the last balancer response, until a new balancer is
// reachable.
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index 9898945bddc..3e24168258e 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -914,12 +914,6 @@ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \
src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \
src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \
src/core/ext/filters/client_channel/lb_policy/xds/xds.h \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_secure.cc \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc \
-src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h \
src/core/ext/filters/client_channel/lb_policy_factory.h \
src/core/ext/filters/client_channel/lb_policy_registry.cc \
src/core/ext/filters/client_channel/lb_policy_registry.h \
@@ -973,6 +967,15 @@ src/core/ext/filters/client_channel/subchannel.h \
src/core/ext/filters/client_channel/subchannel_interface.h \
src/core/ext/filters/client_channel/subchannel_pool_interface.cc \
src/core/ext/filters/client_channel/subchannel_pool_interface.h \
+src/core/ext/filters/client_channel/xds/xds_api.cc \
+src/core/ext/filters/client_channel/xds/xds_api.h \
+src/core/ext/filters/client_channel/xds/xds_channel.h \
+src/core/ext/filters/client_channel/xds/xds_channel_args.h \
+src/core/ext/filters/client_channel/xds/xds_channel_secure.cc \
+src/core/ext/filters/client_channel/xds/xds_client.cc \
+src/core/ext/filters/client_channel/xds/xds_client.h \
+src/core/ext/filters/client_channel/xds/xds_client_stats.cc \
+src/core/ext/filters/client_channel/xds/xds_client_stats.h \
src/core/ext/filters/client_idle/client_idle_filter.cc \
src/core/ext/filters/deadline/deadline_filter.cc \
src/core/ext/filters/deadline/deadline_filter.h \