[xds] Fallback implementation (#36145)

As per [gRFC A71](https://github.com/grpc/proposal/blob/master/A71-xds-fallback.md).

Closes #36145

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/36145 from eugeneo:fallback-review 4871c0b316
PiperOrigin-RevId: 623211613
pull/36308/head
Eugene Ostroukhov 10 months ago committed by Copybara-Service
parent bbaf87b264
commit 4e735be35a
  1. 177
      CMakeLists.txt
  2. 57
      build_autogenerated.yaml
  3. 269
      src/core/ext/xds/xds_client.cc
  4. 12
      src/core/ext/xds/xds_client.h
  5. 4
      src/core/ext/xds/xds_client_grpc.cc
  6. 540
      test/core/xds/xds_client_test.cc
  7. 3
      test/core/xds/xds_transport_fake.cc
  8. 2
      test/core/xds/xds_transport_fake.h
  9. 21
      test/cpp/end2end/xds/BUILD
  10. 18
      test/cpp/end2end/xds/xds_end2end_test_lib.cc
  11. 14
      test/cpp/end2end/xds/xds_end2end_test_lib.h
  12. 311
      test/cpp/end2end/xds/xds_fallback_end2end_test.cc
  13. 21
      test/cpp/end2end/xds/xds_server.cc
  14. 82
      test/cpp/end2end/xds/xds_server.h
  15. 2
      test/cpp/end2end/xds/xds_utils.cc
  16. 12
      test/cpp/end2end/xds/xds_utils.h
  17. 22
      tools/run_tests/generated/tests.json

177
CMakeLists.txt generated

@ -1557,6 +1557,9 @@ if(gRPC_BUILD_TESTS)
add_dependencies(buildtests_cxx xds_end2end_test)
endif()
add_dependencies(buildtests_cxx xds_endpoint_resource_type_test)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx xds_fallback_end2end_test)
endif()
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_dependencies(buildtests_cxx xds_fault_injection_end2end_test)
endif()
@ -34244,6 +34247,180 @@ target_link_libraries(xds_endpoint_resource_type_test
)
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
add_executable(xds_fallback_end2end_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/duplicate/echo_duplicate.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/echo_messages.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/simple_messages.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/simple_messages.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/simple_messages.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/simple_messages.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/address.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/address.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/address.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/address.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/ads.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/ads.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/ads.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/ads.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/base.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/base.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/base.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/base.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/cluster.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/cluster.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/cluster.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/config_source.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/config_source.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/config_source.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/config_source.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/discovery.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/discovery.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/discovery.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/endpoint.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/endpoint.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/expr.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/expr.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/expr.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/expr.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/extension.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/extension.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/extension.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/extension.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/health_check.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/health_check.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/health_check.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/health_check.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_connection_manager.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_connection_manager.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_filter_rbac.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_filter_rbac.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_filter_rbac.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/http_filter_rbac.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/listener.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/listener.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/listener.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/listener.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/load_report.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/load_report.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/load_report.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/load_report.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/lrs.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/lrs.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/lrs.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/metadata.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/metadata.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/metadata.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/metadata.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/orca_load_report.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/orca_load_report.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/orca_load_report.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/orca_load_report.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/outlier_detection.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/outlier_detection.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/outlier_detection.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/outlier_detection.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/path.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/path.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/path.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/path.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/protocol.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/protocol.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/protocol.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/protocol.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/range.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/range.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/range.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/range.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/rbac.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/rbac.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/rbac.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/rbac.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/regex.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/regex.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/regex.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/regex.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/route.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/route.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/route.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/route.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/router.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/router.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/router.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/router.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/string.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/string.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/string.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/string.grpc.pb.h
test/cpp/end2end/test_service_impl.cc
test/cpp/end2end/xds/xds_end2end_test_lib.cc
test/cpp/end2end/xds/xds_fallback_end2end_test.cc
test/cpp/end2end/xds/xds_server.cc
test/cpp/end2end/xds/xds_utils.cc
test/cpp/util/tls_test_utils.cc
)
if(WIN32 AND MSVC)
if(BUILD_SHARED_LIBS)
target_compile_definitions(xds_fallback_end2end_test
PRIVATE
"GPR_DLL_IMPORTS"
"GRPC_DLL_IMPORTS"
"GRPCXX_DLL_IMPORTS"
)
endif()
endif()
target_compile_features(xds_fallback_end2end_test PUBLIC cxx_std_14)
target_include_directories(xds_fallback_end2end_test
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/include
${_gRPC_ADDRESS_SORTING_INCLUDE_DIR}
${_gRPC_RE2_INCLUDE_DIR}
${_gRPC_SSL_INCLUDE_DIR}
${_gRPC_UPB_GENERATED_DIR}
${_gRPC_UPB_GRPC_GENERATED_DIR}
${_gRPC_UPB_INCLUDE_DIR}
${_gRPC_XXHASH_INCLUDE_DIR}
${_gRPC_ZLIB_INCLUDE_DIR}
third_party/googletest/googletest/include
third_party/googletest/googletest
third_party/googletest/googlemock/include
third_party/googletest/googlemock
${_gRPC_PROTO_GENS_DIR}
)
target_link_libraries(xds_fallback_end2end_test
${_gRPC_ALLTARGETS_LIBRARIES}
gtest
grpc++_test_util
)
endif()
endif()
if(gRPC_BUILD_TESTS)
if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)

@ -20861,6 +20861,63 @@ targets:
- protobuf
- grpc_test_util
uses_polling: false
- name: xds_fallback_end2end_test
gtest: true
build: test
language: c++
headers:
- test/core/util/scoped_env_var.h
- test/cpp/end2end/counted_service.h
- test/cpp/end2end/test_service_impl.h
- test/cpp/end2end/xds/xds_end2end_test_lib.h
- test/cpp/end2end/xds/xds_server.h
- test/cpp/end2end/xds/xds_utils.h
- test/cpp/util/tls_test_utils.h
src:
- src/proto/grpc/testing/duplicate/echo_duplicate.proto
- src/proto/grpc/testing/echo.proto
- src/proto/grpc/testing/echo_messages.proto
- src/proto/grpc/testing/simple_messages.proto
- src/proto/grpc/testing/xds/v3/address.proto
- src/proto/grpc/testing/xds/v3/ads.proto
- src/proto/grpc/testing/xds/v3/base.proto
- src/proto/grpc/testing/xds/v3/cluster.proto
- src/proto/grpc/testing/xds/v3/config_source.proto
- src/proto/grpc/testing/xds/v3/discovery.proto
- src/proto/grpc/testing/xds/v3/endpoint.proto
- src/proto/grpc/testing/xds/v3/expr.proto
- src/proto/grpc/testing/xds/v3/extension.proto
- src/proto/grpc/testing/xds/v3/health_check.proto
- src/proto/grpc/testing/xds/v3/http_connection_manager.proto
- src/proto/grpc/testing/xds/v3/http_filter_rbac.proto
- src/proto/grpc/testing/xds/v3/listener.proto
- src/proto/grpc/testing/xds/v3/load_report.proto
- src/proto/grpc/testing/xds/v3/lrs.proto
- src/proto/grpc/testing/xds/v3/metadata.proto
- src/proto/grpc/testing/xds/v3/orca_load_report.proto
- src/proto/grpc/testing/xds/v3/outlier_detection.proto
- src/proto/grpc/testing/xds/v3/path.proto
- src/proto/grpc/testing/xds/v3/percent.proto
- src/proto/grpc/testing/xds/v3/protocol.proto
- src/proto/grpc/testing/xds/v3/range.proto
- src/proto/grpc/testing/xds/v3/rbac.proto
- src/proto/grpc/testing/xds/v3/regex.proto
- src/proto/grpc/testing/xds/v3/route.proto
- src/proto/grpc/testing/xds/v3/router.proto
- src/proto/grpc/testing/xds/v3/string.proto
- test/cpp/end2end/test_service_impl.cc
- test/cpp/end2end/xds/xds_end2end_test_lib.cc
- test/cpp/end2end/xds/xds_fallback_end2end_test.cc
- test/cpp/end2end/xds/xds_server.cc
- test/cpp/end2end/xds/xds_utils.cc
- test/cpp/util/tls_test_utils.cc
deps:
- gtest
- grpc++_test_util
platforms:
- linux
- posix
- mac
- name: xds_fault_injection_end2end_test
gtest: true
build: test

@ -24,7 +24,9 @@
#include <algorithm>
#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/strings/match.h"
@ -555,6 +557,67 @@ void XdsClient::XdsChannel::UnsubscribeLocked(const XdsResourceType* type,
}
}
bool XdsClient::XdsChannel::MaybeFallbackLocked(
const std::string& authority, AuthorityState& authority_state) {
if (!xds_client_->HasUncachedResources(authority_state)) {
return false;
}
std::vector<const XdsBootstrap::XdsServer*> xds_servers;
if (authority != kOldStyleAuthority) {
xds_servers =
xds_client_->bootstrap().LookupAuthority(authority)->servers();
}
if (xds_servers.empty()) xds_servers = xds_client_->bootstrap().servers();
for (size_t i = authority_state.xds_channels.size(); i < xds_servers.size();
++i) {
authority_state.xds_channels.emplace_back(
xds_client_->GetOrCreateXdsChannelLocked(*xds_servers[i], "fallback"));
for (const auto& type_resource : authority_state.resource_map) {
for (const auto& key_state : type_resource.second) {
authority_state.xds_channels.back()->SubscribeLocked(
type_resource.first, {authority, key_state.first});
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] authority %s: added fallback server %s (%s)",
xds_client_.get(), authority.c_str(),
xds_servers[i]->server_uri().c_str(),
authority_state.xds_channels.back()->status().ToString().c_str());
}
if (authority_state.xds_channels.back()->status().ok()) return true;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] authority %s: No fallback server",
xds_client_.get(), authority.c_str());
}
return false;
}
void XdsClient::XdsChannel::SetHealthyLocked() {
status_ = absl::OkStatus();
// Make this channel active iff:
// 1. Channel is on the list of authority channels
// 2. Channel is not the last channel on the list (i.e. not the active
// channel)
for (auto& authority : xds_client_->authority_state_map_) {
auto& channels = authority.second.xds_channels;
// Skip if channel is active.
if (channels.back() == this) continue;
auto channel_it = std::find(channels.begin(), channels.end(), this);
// Skip if this is not on the list
if (channel_it != channels.end()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] authority %s: Falling forward to %s",
xds_client_.get(), authority.first.c_str(),
server_.server_uri().c_str());
}
// Lower priority channels are no longer needed, connection is back!
channels.erase(channel_it + 1, channels.end());
}
}
}
void XdsClient::XdsChannel::OnConnectivityFailure(absl::Status status) {
{
MutexLock lock(&xds_client_->mu_);
@ -584,8 +647,11 @@ void XdsClient::XdsChannel::SetChannelStatusLocked(absl::Status status) {
status_ = status;
// Find all watchers for this channel.
std::set<RefCountedPtr<ResourceWatcherInterface>> watchers;
for (const auto& a : xds_client_->authority_state_map_) { // authority
if (a.second.xds_channel != this) continue;
for (auto& a : xds_client_->authority_state_map_) { // authority
if (a.second.xds_channels.empty() || a.second.xds_channels.back() != this ||
MaybeFallbackLocked(a.first, a.second)) {
continue;
}
for (const auto& t : a.second.resource_map) { // type
for (const auto& r : t.second) { // resource id
for (const auto& w : r.second.watchers) { // watchers
@ -594,15 +660,17 @@ void XdsClient::XdsChannel::SetChannelStatusLocked(absl::Status status) {
}
}
}
// Enqueue notification for the watchers.
xds_client_->work_serializer_.Schedule(
[watchers = std::move(watchers), status = std::move(status)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(xds_client_->work_serializer_) {
for (const auto& watcher : watchers) {
watcher->OnError(status, ReadDelayHandle::NoWait());
}
},
DEBUG_LOCATION);
if (!watchers.empty()) {
// Enqueue notification for the watchers.
xds_client_->work_serializer_.Schedule(
[watchers = std::move(watchers), status = std::move(status)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(xds_client_->work_serializer_) {
for (const auto& watcher : watchers) {
watcher->OnError(status, ReadDelayHandle::NoWait());
}
},
DEBUG_LOCATION);
}
}
//
@ -955,10 +1023,13 @@ XdsClient::XdsChannel::AdsCall::AdsCall(
}
// If this is a reconnect, add any necessary subscriptions from what's
// already in the cache.
for (const auto& a : xds_client()->authority_state_map_) {
for (auto& a : xds_client()->authority_state_map_) {
const std::string& authority = a.first;
// Skip authorities that are not using this xDS channel.
if (a.second.xds_channel != xds_channel()) continue;
auto it = std::find(a.second.xds_channels.begin(),
a.second.xds_channels.end(), xds_channel());
// Skip authorities that are not using this xDS channel. The channel can be
// anywhere in the list.
if (it == a.second.xds_channels.end()) continue;
for (const auto& t : a.second.resource_map) {
const XdsResourceType* type = t.first;
for (const auto& r : t.second) {
@ -1097,7 +1168,7 @@ void XdsClient::XdsChannel::AdsCall::OnRecvMessage(absl::string_view payload) {
status.ToString().c_str());
} else {
seen_response_ = true;
xds_channel()->status_ = absl::OkStatus();
xds_channel()->SetHealthyLocked();
// Update nonce.
auto& state = state_map_[result.type];
state.nonce = result.nonce;
@ -1120,7 +1191,9 @@ void XdsClient::XdsChannel::AdsCall::OnRecvMessage(absl::string_view payload) {
const std::string& authority = a.first;
AuthorityState& authority_state = a.second;
// Skip authorities that are not using this xDS channel.
if (authority_state.xds_channel != xds_channel()) continue;
if (authority_state.xds_channels.back() != xds_channel()) {
continue;
}
auto seen_authority_it = result.resources_seen.find(authority);
// Find this resource type.
auto type_it = authority_state.resource_map.find(result.type);
@ -1567,6 +1640,18 @@ RefCountedPtr<XdsClient::XdsChannel> XdsClient::GetOrCreateXdsChannelLocked(
return xds_channel;
}
bool XdsClient::HasUncachedResources(const AuthorityState& authority_state) {
for (const auto& type_resource : authority_state.resource_map) {
for (const auto& key_state : type_resource.second) {
if (key_state.second.meta.client_status ==
XdsApi::ResourceMetadata::REQUESTED) {
return true;
}
}
}
return false;
}
void XdsClient::WatchResource(const XdsResourceType* type,
absl::string_view name,
RefCountedPtr<ResourceWatcherInterface> watcher) {
@ -1592,7 +1677,7 @@ void XdsClient::WatchResource(const XdsResourceType* type,
return;
}
// Find server to use.
const XdsBootstrap::XdsServer* xds_server = nullptr;
std::vector<const XdsBootstrap::XdsServer*> xds_servers;
if (resource_name->authority != kOldStyleAuthority) {
auto* authority =
bootstrap_->LookupAuthority(std::string(resource_name->authority));
@ -1602,75 +1687,100 @@ void XdsClient::WatchResource(const XdsResourceType* type,
"\" not present in bootstrap config")));
return;
}
xds_server =
authority->servers().empty() ? nullptr : authority->servers().front();
xds_servers = authority->servers();
}
if (xds_server == nullptr) xds_server = bootstrap_->servers().front();
if (xds_servers.empty()) xds_servers = bootstrap_->servers();
{
MutexLock lock(&mu_);
MaybeRegisterResourceTypeLocked(type);
AuthorityState& authority_state =
authority_state_map_[resource_name->authority];
ResourceState& resource_state =
authority_state.resource_map[type][resource_name->key];
auto it_is_new = authority_state.resource_map[type].emplace(
resource_name->key, ResourceState());
bool first_watcher_for_resource = it_is_new.second;
ResourceState& resource_state = it_is_new.first->second;
resource_state.watchers[w] = watcher;
// If we already have a cached value for the resource, notify the new
// watcher immediately.
if (resource_state.resource != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] returning cached listener data for %s", this,
std::string(name).c_str());
}
work_serializer_.Schedule(
[watcher, value = resource_state.resource]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnGenericResourceChanged(value,
ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::DOES_NOT_EXIST) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] reporting cached does-not-exist for %s", this,
std::string(name).c_str());
if (first_watcher_for_resource) {
// We try to add new channels in 2 cases:
// - This is the first resource for this authority (i.e., the list
// of channels is empty).
// - The last channel in the list is failing. That failure may not
// have previously triggered fallback if there were no uncached
// resources, but we've just added a new uncached resource,
// so we need to trigger fallback now.
//
// Note that when we add a channel, it might already be failing
// due to being used in a different authority. So we keep going
// until either we add one that isn't failing or we've added them all.
if (authority_state.xds_channels.empty() ||
!authority_state.xds_channels.back()->status().ok()) {
for (size_t i = authority_state.xds_channels.size();
i < xds_servers.size(); ++i) {
authority_state.xds_channels.emplace_back(
GetOrCreateXdsChannelLocked(*xds_servers[i], "start watch"));
if (authority_state.xds_channels.back()->status().ok()) {
break;
}
}
}
work_serializer_.Schedule(
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnResourceDoesNotExist(ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::NACKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(
GPR_INFO,
"[xds_client %p] reporting cached validation failure for %s: %s",
this, std::string(name).c_str(),
resource_state.meta.failed_details.c_str());
for (const auto& channel : authority_state.xds_channels) {
channel->SubscribeLocked(type, *resource_name);
}
std::string details = resource_state.meta.failed_details;
const auto* node = bootstrap_->node();
if (node != nullptr) {
absl::StrAppend(&details, " (node ID:", bootstrap_->node()->id(), ")");
} else {
// If we already have a cached value for the resource, notify the new
// watcher immediately.
if (resource_state.resource != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] returning cached listener data for %s", this,
std::string(name).c_str());
}
work_serializer_.Schedule(
[watcher, value = resource_state.resource]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnGenericResourceChanged(value,
ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::DOES_NOT_EXIST) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] reporting cached does-not-exist for %s",
this, std::string(name).c_str());
}
work_serializer_.Schedule(
[watcher]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnResourceDoesNotExist(ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::NACKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(
GPR_INFO,
"[xds_client %p] reporting cached validation failure for %s: %s",
this, std::string(name).c_str(),
resource_state.meta.failed_details.c_str());
}
std::string details = resource_state.meta.failed_details;
const auto* node = bootstrap_->node();
if (node != nullptr) {
absl::StrAppend(&details, " (node ID:", bootstrap_->node()->id(),
")");
}
work_serializer_.Schedule(
[watcher, details = std::move(details)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnError(absl::UnavailableError(absl::StrCat(
"invalid resource: ", details)),
ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
}
work_serializer_.Schedule(
[watcher, details = std::move(details)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) {
watcher->OnError(absl::UnavailableError(absl::StrCat(
"invalid resource: ", details)),
ReadDelayHandle::NoWait());
},
DEBUG_LOCATION);
}
// If the authority doesn't yet have a channel, set it, creating it if
// needed.
if (authority_state.xds_channel == nullptr) {
authority_state.xds_channel =
GetOrCreateXdsChannelLocked(*xds_server, "start watch");
}
absl::Status channel_status = authority_state.xds_channel->status();
absl::Status channel_status = authority_state.xds_channels.back()->status();
if (!channel_status.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
@ -1685,7 +1795,6 @@ void XdsClient::WatchResource(const XdsResourceType* type,
},
DEBUG_LOCATION);
}
authority_state.xds_channel->SubscribeLocked(type, *resource_name);
}
work_serializer_.DrainQueue();
}
@ -1723,13 +1832,15 @@ void XdsClient::CancelResourceWatch(const XdsResourceType* type,
this, std::string(type->type_url()).c_str(),
std::string(name).c_str());
}
authority_state.xds_channel->UnsubscribeLocked(type, *resource_name,
delay_unsubscription);
for (const auto& xds_channel : authority_state.xds_channels) {
xds_channel->UnsubscribeLocked(type, *resource_name,
delay_unsubscription);
}
type_map.erase(resource_it);
if (type_map.empty()) {
authority_state.resource_map.erase(type_it);
if (authority_state.resource_map.empty()) {
authority_state.xds_channel.reset();
authority_state.xds_channels.clear();
}
}
}

@ -201,6 +201,8 @@ class XdsClient : public DualRefCounted<XdsClient> {
}
};
struct AuthorityState;
struct XdsResourceName {
std::string authority;
XdsResourceKey key;
@ -244,6 +246,12 @@ class XdsClient : public DualRefCounted<XdsClient> {
absl::string_view server_uri() const { return server_.server_uri(); }
private:
// Attempts to find a suitable Xds fallback server. Returns true if
// a connection to a suitable server had been established.
bool MaybeFallbackLocked(const std::string& authority,
XdsClient::AuthorityState& authority_state)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
void SetHealthyLocked() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&XdsClient::mu_);
void Orphaned() override;
void OnConnectivityFailure(absl::Status status);
@ -283,7 +291,7 @@ class XdsClient : public DualRefCounted<XdsClient> {
};
struct AuthorityState {
RefCountedPtr<XdsChannel> xds_channel;
std::vector<RefCountedPtr<XdsChannel>> xds_channels;
std::map<const XdsResourceType*, std::map<XdsResourceKey, ResourceState>>
resource_map;
};
@ -339,10 +347,10 @@ class XdsClient : public DualRefCounted<XdsClient> {
XdsApi::ClusterLoadReportMap BuildLoadReportSnapshotLocked(
const XdsBootstrap::XdsServer& xds_server, bool send_all_clusters,
const std::set<std::string>& clusters) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
RefCountedPtr<XdsChannel> GetOrCreateXdsChannelLocked(
const XdsBootstrap::XdsServer& server, const char* reason)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
bool HasUncachedResources(const AuthorityState& authority_state);
std::unique_ptr<XdsBootstrap> bootstrap_;
OrphanablePtr<XdsTransportFactory> transport_factory_;

@ -257,8 +257,8 @@ absl::StatusOr<RefCountedPtr<GrpcXdsClient>> GrpcXdsClient::GetOrCreate(
MakeOrphanable<GrpcXdsTransportFactory>(channel_args));
g_xds_client_map->emplace(xds_client->key(), xds_client.get());
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "xDS client for key: %s was created",
std::string(key).c_str());
gpr_log(GPR_INFO, "[xds_client %p] Created xDS client for key %s",
xds_client.get(), std::string(key).c_str());
}
return xds_client;
}

File diff suppressed because it is too large Load Diff

@ -48,7 +48,8 @@ FakeXdsTransportFactory::FakeStreamingCall::~FakeStreamingCall() {
MutexLock lock(&mu_);
if (transport_->abort_on_undrained_messages()) {
for (const auto& message : from_client_messages_) {
gpr_log(GPR_ERROR, "From client message left in queue: %s",
gpr_log(GPR_ERROR, "[%s] %p From client message left in queue: %s",
transport_->server()->server_uri().c_str(), this,
message.c_str());
}
GPR_ASSERT(from_client_messages_.empty());

@ -214,6 +214,8 @@ class FakeXdsTransportFactory : public XdsTransportFactory {
FakeXdsTransportFactory* factory() const { return factory_.get(); }
const XdsBootstrap::XdsServer* server() const { return &server_; }
private:
class RefCountedOnConnectivityFailure
: public RefCounted<RefCountedOnConnectivityFailure> {

@ -494,3 +494,24 @@ grpc_cc_test(
"//test/cpp/end2end:connection_attempt_injector",
],
)
grpc_cc_test(
name = "xds_fallback_end2end_test",
srcs = ["xds_fallback_end2end_test.cc"],
external_deps = [
"gtest",
],
linkstatic = True, # Fixes dyld error on MacOS
tags = [
"no_test_ios",
"no_windows",
"xds_end2end_test",
], # TODO(jtattermusch): fix test on windows
deps = [
":xds_end2end_test_lib",
"//:gpr",
"//:grpc",
"//:grpc++",
"//test/core/util:scoped_env_var",
],
)

@ -286,10 +286,11 @@ void XdsEnd2endTest::BackendServerThread::ShutdownAllServices() {
//
XdsEnd2endTest::BalancerServerThread::BalancerServerThread(
XdsEnd2endTest* test_obj)
XdsEnd2endTest* test_obj, absl::string_view debug_label)
: ServerThread(test_obj, /*use_xds_enabled_server=*/false),
ads_service_(new AdsServiceImpl(
// First request must have node set with the right client features.
// First request must have node set with the right client
// features.
[&](const DiscoveryRequest& request) {
EXPECT_TRUE(request.has_node());
EXPECT_THAT(request.node().client_features(),
@ -300,7 +301,8 @@ XdsEnd2endTest::BalancerServerThread::BalancerServerThread(
// NACKs must use the right status code.
[&](absl::StatusCode code) {
EXPECT_EQ(code, absl::StatusCode::kInvalidArgument);
})),
},
debug_label)),
lrs_service_(new LrsServiceImpl(
(GetParam().enable_load_reporting() ? 20 : 0), {kDefaultClusterName},
// Fail if load reporting is used when not enabled.
@ -311,7 +313,8 @@ XdsEnd2endTest::BalancerServerThread::BalancerServerThread(
EXPECT_THAT(
request.node().client_features(),
::testing::Contains("envoy.lrs.supports_send_all_clusters"));
})) {}
},
debug_label)) {}
void XdsEnd2endTest::BalancerServerThread::RegisterAllServices(
ServerBuilder* builder) {
@ -377,7 +380,8 @@ const char XdsEnd2endTest::kServerKeyPath[] =
const char XdsEnd2endTest::kRequestMessage[] = "Live long and prosper.";
XdsEnd2endTest::XdsEnd2endTest() : balancer_(CreateAndStartBalancer()) {
XdsEnd2endTest::XdsEnd2endTest()
: balancer_(CreateAndStartBalancer("Default Balancer")) {
// Initialize default client-side xDS resources.
default_listener_ = XdsResourceUtils::DefaultListener();
default_route_config_ = XdsResourceUtils::DefaultRouteConfig();
@ -409,9 +413,9 @@ void XdsEnd2endTest::TearDown() {
}
std::unique_ptr<XdsEnd2endTest::BalancerServerThread>
XdsEnd2endTest::CreateAndStartBalancer() {
XdsEnd2endTest::CreateAndStartBalancer(absl::string_view debug_label) {
std::unique_ptr<BalancerServerThread> balancer =
std::make_unique<BalancerServerThread>(this);
std::make_unique<BalancerServerThread>(this, debug_label);
balancer->Start();
return balancer;
}

@ -239,7 +239,11 @@ class XdsEnd2endTest : public ::testing::TestWithParam<XdsTestType>,
port_(grpc_pick_unused_port_or_die()),
use_xds_enabled_server_(use_xds_enabled_server) {}
virtual ~ServerThread() { Shutdown(); }
virtual ~ServerThread() {
// Shutdown should be called manually. Shutdown calls virtual methods and
// can't be called from the base class destructor.
GPR_ASSERT(!running_);
}
void Start();
void Shutdown();
@ -249,6 +253,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<XdsTestType>,
grpc_fake_transport_security_server_credentials_create());
}
std::string target() const { return absl::StrCat("localhost:", port_); }
int port() const { return port_; }
bool use_xds_enabled_server() const { return use_xds_enabled_server_; }
@ -398,7 +404,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<XdsTestType>,
// A server thread for the xDS server.
class BalancerServerThread : public ServerThread {
public:
explicit BalancerServerThread(XdsEnd2endTest* test_obj);
explicit BalancerServerThread(XdsEnd2endTest* test_obj,
absl::string_view debug_label);
AdsServiceImpl* ads_service() { return ads_service_.get(); }
LrsServiceImpl* lrs_service() { return lrs_service_.get(); }
@ -439,7 +446,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam<XdsTestType>,
// Creates and starts a new balancer, running in its own thread.
// Most tests will not need to call this; instead, they can use
// balancer_, which is already populated with default resources.
std::unique_ptr<BalancerServerThread> CreateAndStartBalancer();
std::unique_ptr<BalancerServerThread> CreateAndStartBalancer(
absl::string_view debug_label = "");
// Sets the Listener and RouteConfiguration resource on the specified
// balancer. If RDS is in use, they will be set as separate resources;

@ -0,0 +1,311 @@
// Copyright 2017 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <iostream>
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/cleanup/cleanup.h"
#include "absl/strings/str_format.h"
#include "absl/strings/strip.h"
#include <grpcpp/create_channel.h>
#include <grpcpp/security/credentials.h>
#include <grpcpp/support/status.h>
#include "src/core/client_channel/backup_poller.h"
#include "src/core/lib/config/config_vars.h"
#include "src/core/lib/gprpp/env.h"
#include "src/cpp/client/secure_credentials.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "src/proto/grpc/testing/echo_messages.pb.h"
#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h"
#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/scoped_env_var.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/xds/xds_end2end_test_lib.h"
#include "test/cpp/end2end/xds/xds_utils.h"
namespace grpc {
namespace testing {
namespace {
constexpr char const* kErrorMessage = "test forced ADS stream failure";
class XdsFallbackTest : public XdsEnd2endTest {
public:
XdsFallbackTest()
: fallback_balancer_(CreateAndStartBalancer("Fallback Balancer")) {}
void SetUp() override {
// Overrides SetUp from a base class so we can call InitClient per-test case
}
void TearDown() override {
fallback_balancer_->Shutdown();
XdsEnd2endTest::TearDown();
}
void SetXdsResourcesForServer(BalancerServerThread* balancer, size_t backend,
absl::string_view server_name = "",
absl::string_view authority = "") {
Listener listener = default_listener_;
RouteConfiguration route_config = default_route_config_;
Cluster cluster = default_cluster_;
// Default server uses default resources when no authority, to enable using
// more test framework functions.
if (!server_name.empty() || !authority.empty()) {
auto get_resource_name = [&](absl::string_view resource_type) {
absl::string_view stripped_resource_type =
absl::StripPrefix(resource_type, "type.googleapis.com/");
if (authority.empty()) {
if (resource_type == kLdsTypeUrl) return std::string(server_name);
return absl::StrFormat("%s_%s", stripped_resource_type, server_name);
}
return absl::StrFormat("xdstp://%s/%s/%s", authority,
stripped_resource_type, server_name);
};
listener.set_name(get_resource_name(kLdsTypeUrl));
cluster.set_name(get_resource_name(kCdsTypeUrl));
cluster.mutable_eds_cluster_config()->set_service_name(
get_resource_name(kEdsTypeUrl));
route_config.set_name(get_resource_name(kRdsTypeUrl));
route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(cluster.name());
}
SetListenerAndRouteConfiguration(balancer, listener, route_config);
balancer->ads_service()->SetCdsResource(cluster);
balancer->ads_service()->SetEdsResource(BuildEdsResource(
EdsResourceArgs(
{{"locality0", CreateEndpointsForBackends(backend, backend + 1)}}),
cluster.eds_cluster_config().service_name()));
}
void ExpectBackendCall(EchoTestService::Stub* stub, int backend,
grpc_core::DebugLocation location) {
ClientContext context;
EchoRequest request;
EchoResponse response;
RpcOptions().SetupRpc(&context, &request);
Status status = stub->Echo(&context, request, &response);
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message() << "\n"
<< location.file() << ':' << location.line();
EXPECT_EQ(1U, backends_[backend]->backend_service()->request_count())
<< "\n"
<< location.file() << ':' << location.line();
}
protected:
std::unique_ptr<BalancerServerThread> fallback_balancer_;
};
TEST_P(XdsFallbackTest, FallbackAndRecover) {
grpc_core::testing::ScopedEnvVar fallback_enabled(
"GRPC_EXPERIMENTAL_XDS_FALLBACK", "1");
auto broken_balancer = CreateAndStartBalancer("Broken balancer");
broken_balancer->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
InitClient(XdsBootstrapBuilder().SetServers({
balancer_->target(),
broken_balancer->target(),
fallback_balancer_->target(),
}));
// Primary xDS server has backends_[0] configured and fallback server has
// backends_[1]
CreateAndStartBackends(2);
SetXdsResourcesForServer(balancer_.get(), 0);
SetXdsResourcesForServer(fallback_balancer_.get(), 1);
balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
// Primary server down, fallback server data is used (backends_[1])
CheckRpcSendOk(DEBUG_LOCATION);
EXPECT_EQ(backends_[0]->backend_service()->request_count(), 0);
EXPECT_EQ(backends_[1]->backend_service()->request_count(), 1);
// Primary server is back. backends_[0] will be used when the data makes it
// all way to the client
balancer_->ads_service()->ClearADSFailure();
WaitForBackend(DEBUG_LOCATION, 0);
broken_balancer->Shutdown();
}
TEST_P(XdsFallbackTest, EnvVarNotSet) {
InitClient(XdsBootstrapBuilder().SetServers({
balancer_->target(),
fallback_balancer_->target(),
}));
// Primary xDS server has backends_[0] configured and fallback server has
// backends_[1]
CreateAndStartBackends(2);
SetXdsResourcesForServer(balancer_.get(), 0);
SetXdsResourcesForServer(fallback_balancer_.get(), 1);
balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
// Primary server down, failure should be reported
CheckRpcSendFailure(
DEBUG_LOCATION, StatusCode::UNAVAILABLE,
absl::StrFormat("server.example.com: UNAVAILABLE: xDS channel for server "
"localhost:%d: xDS call failed with no responses "
"received; status: RESOURCE_EXHAUSTED: test forced ADS "
"stream failure \\(node ID:xds_end2end_test\\)",
balancer_->port()));
}
TEST_P(XdsFallbackTest, PrimarySecondaryNotAvailable) {
grpc_core::testing::ScopedEnvVar fallback_enabled(
"GRPC_EXPERIMENTAL_XDS_FALLBACK", "1");
InitClient(XdsBootstrapBuilder().SetServers(
{balancer_->target(), fallback_balancer_->target()}));
balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
fallback_balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
CheckRpcSendFailure(
DEBUG_LOCATION, StatusCode::UNAVAILABLE,
absl::StrFormat(
"server.example.com: UNAVAILABLE: xDS channel for server "
"localhost:%d: xDS call failed with no responses received; "
"status: RESOURCE_EXHAUSTED: test forced ADS stream failure \\(node "
"ID:xds_end2end_test\\)",
fallback_balancer_->port()));
}
TEST_P(XdsFallbackTest, UsesCachedResourcesAfterFailure) {
constexpr absl::string_view kServerName2 = "server2.example.com";
grpc_core::testing::ScopedEnvVar fallback_enabled(
"GRPC_EXPERIMENTAL_XDS_FALLBACK", "1");
InitClient(XdsBootstrapBuilder().SetServers(
{balancer_->target(), fallback_balancer_->target()}));
// 4 backends - cross product of two data plane targets and two balancers
CreateAndStartBackends(4);
SetXdsResourcesForServer(balancer_.get(), 0);
SetXdsResourcesForServer(fallback_balancer_.get(), 1);
SetXdsResourcesForServer(balancer_.get(), 2, kServerName2);
SetXdsResourcesForServer(fallback_balancer_.get(), 3, kServerName2);
CheckRpcSendOk(DEBUG_LOCATION);
EXPECT_EQ(backends_[0]->backend_service()->request_count(), 1);
balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
auto channel = CreateChannel(0, std::string(kServerName2).c_str());
auto stub = grpc::testing::EchoTestService::NewStub(channel);
// server2.example.com is configured from the fallback server
ExpectBackendCall(stub.get(), 3, DEBUG_LOCATION);
// Calling server.example.com still uses cached value
CheckRpcSendOk(DEBUG_LOCATION);
EXPECT_EQ(backends_[0]->backend_service()->request_count(), 2);
EXPECT_EQ(backends_[1]->backend_service()->request_count(), 0);
}
TEST_P(XdsFallbackTest, PerAuthorityFallback) {
auto fallback_balancer2 = CreateAndStartBalancer("Fallback for Authority2");
// Use cleanup in case test assertion fails
auto balancer2_cleanup =
absl::MakeCleanup([&]() { fallback_balancer2->Shutdown(); });
grpc_core::testing::ScopedEnvVar fallback_enabled(
"GRPC_EXPERIMENTAL_XDS_FALLBACK", "1");
grpc_core::testing::ScopedExperimentalEnvVar env_var(
"GRPC_EXPERIMENTAL_XDS_FEDERATION");
const char* kAuthority1 = "xds1.example.com";
const char* kAuthority2 = "xds2.example.com";
constexpr absl::string_view kServer1Name = "server1.example.com";
constexpr absl::string_view kServer2Name = "server2.example.com";
// Authority1 uses balancer_ and fallback_balancer_
// Authority2 uses balancer_ and fallback_balancer2
XdsBootstrapBuilder builder;
builder.SetServers({balancer_->target()});
builder.AddAuthority(kAuthority1,
{balancer_->target(), fallback_balancer_->target()});
builder.AddAuthority(kAuthority2,
{balancer_->target(), fallback_balancer2->target()});
InitClient(builder);
CreateAndStartBackends(4);
SetXdsResourcesForServer(fallback_balancer_.get(), 0, kServer1Name,
kAuthority1);
SetXdsResourcesForServer(fallback_balancer2.get(), 1, kServer2Name,
kAuthority2);
SetXdsResourcesForServer(balancer_.get(), 2, kServer1Name, kAuthority1);
SetXdsResourcesForServer(balancer_.get(), 3, kServer2Name, kAuthority2);
// Primary balancer is down, using the fallback servers
balancer_->ads_service()->ForceADSFailure(
Status(StatusCode::RESOURCE_EXHAUSTED, kErrorMessage));
// Create second channel to new target URI and send 1 RPC.
auto authority1_stub = grpc::testing::EchoTestService::NewStub(CreateChannel(
/*failover_timeout_ms=*/0, std::string(kServer1Name).c_str(),
kAuthority1));
auto authority2_stub = grpc::testing::EchoTestService::NewStub(CreateChannel(
/*failover_timeout_ms=*/0, std::string(kServer2Name).c_str(),
kAuthority2));
ExpectBackendCall(authority1_stub.get(), 0, DEBUG_LOCATION);
ExpectBackendCall(authority2_stub.get(), 1, DEBUG_LOCATION);
// Primary balancer is up, its data will be used now.
balancer_->ads_service()->ClearADSFailure();
auto deadline =
absl::Now() + (absl::Seconds(5) * grpc_test_slowdown_factor());
while (absl::Now() < deadline &&
(backends_[2]->backend_service()->request_count() == 0 ||
backends_[3]->backend_service()->request_count() == 0)) {
ClientContext context;
EchoRequest request;
EchoResponse response;
RpcOptions().SetupRpc(&context, &request);
Status status = authority1_stub->Echo(&context, request, &response);
EXPECT_TRUE(status.ok()) << status.error_message();
ClientContext context2;
EchoRequest request2;
EchoResponse response2;
RpcOptions().SetupRpc(&context2, &request2);
status = authority2_stub->Echo(&context2, request2, &response2);
EXPECT_TRUE(status.ok()) << status.error_message();
}
ASSERT_LE(1U, backends_[2]->backend_service()->request_count());
ASSERT_LE(1U, backends_[3]->backend_service()->request_count());
}
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsFallbackTest,
::testing::Values(XdsTestType().set_bootstrap_source(
XdsTestType::kBootstrapFromEnvVar)),
&XdsTestType::Name);
} // namespace
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
::testing::InitGoogleTest(&argc, argv);
// Make the backup poller poll very frequently in order to pick up
// updates from all the subchannels's FDs.
grpc_core::ConfigVars::Overrides overrides;
overrides.client_channel_backup_poll_interval_ms = 1;
grpc_core::ConfigVars::SetOverrides(overrides);
#if TARGET_OS_IPHONE
// Workaround Apple CFStream bug
grpc_core::SetEnv("grpc_cfstream", "0");
#endif
grpc_init();
const auto result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}

@ -51,8 +51,8 @@ void AdsServiceImpl::SetResource(google::protobuf::Any resource,
resource_type_state.resource_type_version;
resource_state.resource = std::move(resource);
gpr_log(GPR_INFO,
"ADS[%p]: Updating %s resource %s; resource_type_version now %u",
this, type_url.c_str(), name.c_str(),
"ADS[%s]: Updating %s resource %s; resource_type_version now %u",
debug_label_.c_str(), type_url.c_str(), name.c_str(),
resource_type_state.resource_type_version);
for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
@ -69,8 +69,8 @@ void AdsServiceImpl::UnsetResource(const std::string& type_url,
resource_type_state.resource_type_version;
resource_state.resource.reset();
gpr_log(GPR_INFO,
"ADS[%p]: Unsetting %s resource %s; resource_type_version now %u",
this, type_url.c_str(), name.c_str(),
"ADS[%s]: Unsetting %s resource %s; resource_type_version now %u",
debug_label_.c_str(), type_url.c_str(), name.c_str(),
resource_type_state.resource_type_version);
for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
@ -100,8 +100,8 @@ bool AdsServiceImpl::MaybeSubscribe(const std::string& resource_type,
if (subscription_state->update_queue != nullptr) return false;
subscription_state->update_queue = update_queue;
resource_state->subscriptions.emplace(subscription_state);
gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p",
this, resource_type.c_str(), resource_name.c_str(),
gpr_log(GPR_INFO, "ADS[%s]: subscribe to resource type %s name %s state %p",
debug_label_.c_str(), resource_type.c_str(), resource_name.c_str(),
&subscription_state);
return true;
}
@ -122,8 +122,9 @@ void AdsServiceImpl::ProcessUnsubscriptions(
++it;
continue;
}
gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p", this,
resource_type.c_str(), resource_name.c_str(), &subscription_state);
gpr_log(GPR_INFO, "ADS[%s]: Unsubscribe to type=%s name=%s state=%p",
debug_label_.c_str(), resource_type.c_str(), resource_name.c_str(),
&subscription_state);
auto resource_it = resource_name_map->find(resource_name);
GPR_ASSERT(resource_it != resource_name_map->end());
auto& resource_state = resource_it->second;
@ -150,7 +151,7 @@ void AdsServiceImpl::Shutdown() {
}
resource_type_response_state_.clear();
}
gpr_log(GPR_INFO, "ADS[%p]: shut down", this);
gpr_log(GPR_INFO, "ADS[%s]: shut down", debug_label_.c_str());
}
//
@ -231,7 +232,7 @@ void LrsServiceImpl::Shutdown() {
lrs_cv_.SignalAll();
}
}
gpr_log(GPR_INFO, "LRS[%p]: shut down", this);
gpr_log(GPR_INFO, "LRS[%s]: shut down", debug_label_.c_str());
}
std::vector<LrsServiceImpl::ClientStats> LrsServiceImpl::WaitForLoadReport(

@ -26,6 +26,7 @@
#include "absl/types/optional.h"
#include <grpc/support/log.h>
#include <grpcpp/support/status.h>
#include "src/core/lib/address_utils/parse_address.h"
#include "src/core/lib/gprpp/crash.h"
@ -75,9 +76,12 @@ class AdsServiceImpl
explicit AdsServiceImpl(
std::function<void(const DiscoveryRequest& request)> check_first_request =
nullptr,
std::function<void(absl::StatusCode)> check_nack_status_code = nullptr)
std::function<void(absl::StatusCode)> check_nack_status_code = nullptr,
absl::string_view debug_label = "")
: check_first_request_(std::move(check_first_request)),
check_nack_status_code_(std::move(check_nack_status_code)) {}
check_nack_status_code_(std::move(check_nack_status_code)),
debug_label_(absl::StrFormat(
"%p%s%s", this, debug_label.empty() ? "" : ":", debug_label)) {}
void set_wrap_resources(bool wrap_resources) {
grpc_core::MutexLock lock(&ads_mu_);
@ -174,6 +178,11 @@ class AdsServiceImpl
forced_ads_failure_ = std::move(status);
}
void ClearADSFailure() {
grpc_core::MutexLock lock(&ads_mu_);
forced_ads_failure_ = absl::nullopt;
}
private:
// A queue of resource type/name pairs that have changed since the client
// subscribed to them.
@ -223,14 +232,15 @@ class AdsServiceImpl
Status StreamAggregatedResources(ServerContext* context,
Stream* stream) override {
gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this);
gpr_log(GPR_INFO, "ADS[%s]: StreamAggregatedResources starts",
debug_label_.c_str());
{
grpc_core::MutexLock lock(&ads_mu_);
if (forced_ads_failure_.has_value()) {
gpr_log(GPR_INFO,
"ADS[%p]: StreamAggregatedResources forcing early failure "
"ADS[%s]: StreamAggregatedResources forcing early failure "
"with status code: %d, message: %s",
this, forced_ads_failure_.value().error_code(),
debug_label_.c_str(), forced_ads_failure_.value().error_code(),
forced_ads_failure_.value().error_message().c_str());
return forced_ads_failure_.value();
}
@ -273,8 +283,9 @@ class AdsServiceImpl
requests.pop_front();
did_work = true;
gpr_log(GPR_INFO,
"ADS[%p]: Received request for type %s with content %s", this,
request.type_url().c_str(), request.DebugString().c_str());
"ADS[%s]: Received request for type %s with content %s",
debug_label_.c_str(), request.type_url().c_str(),
request.DebugString().c_str());
SentState& sent_state = sent_state_map[request.type_url()];
// Process request.
ProcessRequest(request, &update_queue, &subscription_map, &sent_state,
@ -282,7 +293,7 @@ class AdsServiceImpl
}
}
if (response.has_value()) {
gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
gpr_log(GPR_INFO, "ADS[%s]: Sending response: %s", debug_label_.c_str(),
response->DebugString().c_str());
stream->Write(response.value());
}
@ -303,8 +314,8 @@ class AdsServiceImpl
}
}
if (response.has_value()) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
response->DebugString().c_str());
gpr_log(GPR_INFO, "ADS[%s]: Sending update response: %s",
debug_label_.c_str(), response->DebugString().c_str());
stream->Write(response.value());
}
{
@ -338,7 +349,8 @@ class AdsServiceImpl
}
}
}
gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this);
gpr_log(GPR_INFO, "ADS[%s]: StreamAggregatedResources done",
debug_label_.c_str());
RemoveClient(context->peer());
return Status::OK;
}
@ -369,8 +381,8 @@ class AdsServiceImpl
ResponseState response_state;
if (!request.has_error_detail()) {
response_state.state = ResponseState::ACKED;
gpr_log(GPR_INFO, "ADS[%p]: client ACKed resource_type=%s version=%s",
this, request.type_url().c_str(),
gpr_log(GPR_INFO, "ADS[%s]: client ACKed resource_type=%s version=%s",
debug_label_.c_str(), request.type_url().c_str(),
request.version_info().c_str());
} else {
response_state.state = ResponseState::NACKED;
@ -380,8 +392,9 @@ class AdsServiceImpl
}
response_state.error_message = request.error_detail().message();
gpr_log(GPR_INFO,
"ADS[%p]: client NACKed resource_type=%s version=%s: %s", this,
request.type_url().c_str(), request.version_info().c_str(),
"ADS[%s]: client NACKed resource_type=%s version=%s: %s",
debug_label_.c_str(), request.type_url().c_str(),
request.version_info().c_str(),
response_state.error_message.c_str());
}
resource_type_response_state_[request.type_url()].emplace_back(
@ -412,8 +425,9 @@ class AdsServiceImpl
&resource_state, update_queue) ||
ClientNeedsResourceUpdate(resource_type_state, resource_state,
sent_state->resource_type_version)) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
request.type_url().c_str(), resource_name.c_str());
gpr_log(GPR_INFO, "ADS[%s]: Sending update for type=%s name=%s",
debug_label_.c_str(), request.type_url().c_str(),
resource_name.c_str());
resources_added_to_response.emplace(resource_name);
if (!response->has_value()) response->emplace();
if (resource_state.resource.has_value()) {
@ -427,8 +441,9 @@ class AdsServiceImpl
}
} else {
gpr_log(GPR_INFO,
"ADS[%p]: client does not need update for type=%s name=%s",
this, request.type_url().c_str(), resource_name.c_str());
"ADS[%s]: client does not need update for type=%s name=%s",
debug_label_.c_str(), request.type_url().c_str(),
resource_name.c_str());
}
}
// Process unsubscriptions for any resource no longer
@ -451,8 +466,8 @@ class AdsServiceImpl
SubscriptionMap* subscription_map, SentState* sent_state,
absl::optional<DiscoveryResponse>* response)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(ads_mu_) {
gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s", this,
resource_type.c_str(), resource_name.c_str());
gpr_log(GPR_INFO, "ADS[%s]: Received update for type=%s name=%s",
debug_label_.c_str(), resource_type.c_str(), resource_name.c_str());
auto& subscription_name_map = (*subscription_map)[resource_type];
auto& resource_type_state = resource_map_[resource_type];
auto& resource_name_map = resource_type_state.resource_name_map;
@ -461,8 +476,9 @@ class AdsServiceImpl
ResourceState& resource_state = resource_name_map[resource_name];
if (ClientNeedsResourceUpdate(resource_type_state, resource_state,
sent_state->resource_type_version)) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
resource_type.c_str(), resource_name.c_str());
gpr_log(GPR_INFO, "ADS[%s]: Sending update for type=%s name=%s",
debug_label_.c_str(), resource_type.c_str(),
resource_name.c_str());
response->emplace();
if (resource_state.resource.has_value()) {
auto* resource = (*response)->add_resources();
@ -493,7 +509,8 @@ class AdsServiceImpl
requests->emplace_back(std::move(request));
}
}
gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this);
gpr_log(GPR_INFO, "ADS[%s]: Null read, stream closed",
debug_label_.c_str());
grpc_core::MutexLock lock(&ads_mu_);
*stream_closed = true;
}
@ -564,6 +581,7 @@ class AdsServiceImpl
std::function<void(const DiscoveryRequest& request)> check_first_request_;
std::function<void(absl::StatusCode)> check_nack_status_code_;
std::string debug_label_;
grpc_core::CondVar ads_cond_;
grpc_core::Mutex ads_mu_;
@ -700,12 +718,15 @@ class LrsServiceImpl
std::set<std::string> cluster_names,
std::function<void()> stream_started_callback = nullptr,
std::function<void(const LoadStatsRequest& request)>
check_first_request = nullptr)
check_first_request = nullptr,
absl::string_view debug_label = "")
: client_load_reporting_interval_seconds_(
client_load_reporting_interval_seconds),
cluster_names_(std::move(cluster_names)),
stream_started_callback_(std::move(stream_started_callback)),
check_first_request_(std::move(check_first_request)) {}
check_first_request_(std::move(check_first_request)),
debug_label_(absl::StrFormat(
"%p%s%s", this, debug_label.empty() ? "" : ":", debug_label)) {}
// Must be called before the LRS call is started.
void set_send_all_clusters(bool send_all_clusters) {
@ -729,7 +750,7 @@ class LrsServiceImpl
using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>;
Status StreamLoadStats(ServerContext* /*context*/, Stream* stream) override {
gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this);
gpr_log(GPR_INFO, "LRS[%s]: StreamLoadStats starts", debug_label_.c_str());
if (stream_started_callback_ != nullptr) stream_started_callback_();
// Take a reference of the LrsServiceImpl object, reference will go
// out of scope after this method exits.
@ -756,8 +777,8 @@ class LrsServiceImpl
// Wait for report.
request.Clear();
while (stream->Read(&request)) {
gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s",
this, request.DebugString().c_str());
gpr_log(GPR_INFO, "LRS[%s]: received client load report message: %s",
debug_label_.c_str(), request.DebugString().c_str());
std::vector<ClientStats> stats;
for (const auto& cluster_stats : request.cluster_stats()) {
stats.emplace_back(cluster_stats);
@ -774,7 +795,7 @@ class LrsServiceImpl
lrs_cv_.Wait(&lrs_mu_);
}
}
gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this);
gpr_log(GPR_INFO, "LRS[%s]: StreamLoadStats done", debug_label_.c_str());
return Status::OK;
}
@ -783,6 +804,7 @@ class LrsServiceImpl
std::set<std::string> cluster_names_;
std::function<void()> stream_started_callback_;
std::function<void(const LoadStatsRequest& request)> check_first_request_;
std::string debug_label_;
grpc_core::CondVar lrs_cv_;
grpc_core::Mutex lrs_mu_;

@ -151,7 +151,7 @@ std::string XdsBootstrapBuilder::MakeAuthorityText() {
const std::string& name = p.first;
const AuthorityInfo& authority_info = p.second;
std::vector<std::string> fields = {
MakeXdsServersText({authority_info.server})};
MakeXdsServersText(authority_info.servers)};
if (!authority_info.client_listener_resource_name_template.empty()) {
fields.push_back(absl::StrCat(
"\"client_listener_resource_name_template\": \"",

@ -62,7 +62,15 @@ class XdsBootstrapBuilder {
XdsBootstrapBuilder& AddAuthority(
const std::string& authority, const std::string& server = "",
const std::string& client_listener_resource_name_template = "") {
authorities_[authority] = {server, client_listener_resource_name_template};
return AddAuthority(authority,
server.empty() ? std::vector<std::string>()
: std::vector<std::string>({server}),
client_listener_resource_name_template);
}
XdsBootstrapBuilder& AddAuthority(
const std::string& authority, const std::vector<std::string>& servers,
const std::string& client_listener_resource_name_template = "") {
authorities_[authority] = {servers, client_listener_resource_name_template};
return *this;
}
XdsBootstrapBuilder& SetServerListenerResourceNameTemplate(
@ -80,7 +88,7 @@ class XdsBootstrapBuilder {
std::string plugin_config;
};
struct AuthorityInfo {
std::string server;
std::vector<std::string> servers;
std::string client_listener_resource_name_template;
};

@ -11907,6 +11907,28 @@
],
"uses_polling": false
},
{
"args": [],
"benchmark": false,
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"flaky": false,
"gtest": true,
"language": "c++",
"name": "xds_fallback_end2end_test",
"platforms": [
"linux",
"mac",
"posix"
],
"uses_polling": true
},
{
"args": [],
"benchmark": false,

Loading…
Cancel
Save