The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#) https://grpc.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

8838 lines
378 KiB

/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <deque>
#include <memory>
#include <mutex>
#include <numeric>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
#include <grpcpp/security/tls_certificate_provider.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/xds_server_builder.h>
#include "src/core/ext/filters/client_channel/backup_poller.h"
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel_args.h"
#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
#include "src/core/ext/filters/client_channel/server_address.h"
#include "src/core/ext/xds/certificate_provider_registry.h"
#include "src/core/ext/xds/xds_api.h"
#include "src/core/ext/xds/xds_channel_args.h"
#include "src/core/ext/xds/xds_client.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gpr/time_precise.h"
#include "src/core/lib/gpr/tmpfile.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/gprpp/time_util.h"
#include "src/core/lib/iomgr/load_file.h"
#include "src/core/lib/iomgr/parse_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/security/credentials/fake/fake_credentials.h"
#include "src/cpp/client/secure_credentials.h"
#include "src/cpp/server/secure_server_credentials.h"
#include "test/core/util/port.h"
#include "test/core/util/resolve_localhost_ip46.h"
#include "test/core/util/test_config.h"
#include "test/cpp/end2end/test_service_impl.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
5 years ago
#include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/cds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/eds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.h"
#include "src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h"
4 years ago
#include "src/proto/grpc/testing/xds/v3/ads.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/aggregate_cluster.grpc.pb.h"
4 years ago
#include "src/proto/grpc/testing/xds/v3/cluster.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/discovery.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/endpoint.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/http_connection_manager.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/listener.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/lrs.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/route.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/router.grpc.pb.h"
#include "src/proto/grpc/testing/xds/v3/tls.grpc.pb.h"
4 years ago
namespace grpc {
namespace testing {
namespace {
using std::chrono::system_clock;
using ::envoy::config::cluster::v3::CircuitBreakers;
4 years ago
using ::envoy::config::cluster::v3::Cluster;
using ::envoy::config::cluster::v3::CustomClusterType;
using ::envoy::config::cluster::v3::RoutingPriority;
4 years ago
using ::envoy::config::endpoint::v3::ClusterLoadAssignment;
using ::envoy::config::endpoint::v3::HealthStatus;
using ::envoy::config::listener::v3::Listener;
using ::envoy::config::route::v3::RouteConfiguration;
using ::envoy::extensions::clusters::aggregate::v3::ClusterConfig;
4 years ago
using ::envoy::extensions::filters::network::http_connection_manager::v3::
HttpConnectionManager;
using ::envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext;
using ::envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext;
using ::envoy::type::matcher::v3::StringMatcher;
4 years ago
using ::envoy::type::v3::FractionalPercent;
constexpr char kLdsTypeUrl[] =
"type.googleapis.com/envoy.config.listener.v3.Listener";
constexpr char kRdsTypeUrl[] =
4 years ago
"type.googleapis.com/envoy.config.route.v3.RouteConfiguration";
constexpr char kCdsTypeUrl[] =
"type.googleapis.com/envoy.config.cluster.v3.Cluster";
constexpr char kEdsTypeUrl[] =
4 years ago
"type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment";
constexpr char kLdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Listener";
constexpr char kRdsV2TypeUrl[] =
"type.googleapis.com/envoy.api.v2.RouteConfiguration";
constexpr char kCdsV2TypeUrl[] = "type.googleapis.com/envoy.api.v2.Cluster";
constexpr char kEdsV2TypeUrl[] =
"type.googleapis.com/envoy.api.v2.ClusterLoadAssignment";
4 years ago
constexpr char kDefaultLocalityRegion[] = "xds_default_locality_region";
constexpr char kDefaultLocalityZone[] = "xds_default_locality_zone";
constexpr char kLbDropType[] = "lb";
constexpr char kThrottleDropType[] = "throttle";
constexpr char kServerName[] = "server.example.com";
constexpr char kDefaultRouteConfigurationName[] = "route_config_name";
constexpr char kDefaultClusterName[] = "cluster_name";
constexpr char kDefaultEdsServiceName[] = "eds_service_name";
constexpr int kDefaultLocalityWeight = 3;
constexpr int kDefaultLocalityPriority = 0;
constexpr char kRequestMessage[] = "Live long and prosper.";
constexpr char kDefaultServiceConfig[] =
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_cluster_resolver_experimental\":{\n"
" \"discoveryMechanisms\": [\n"
" { \"clusterName\": \"server.example.com\",\n"
" \"type\": \"EDS\",\n"
" \"lrsLoadReportingServerName\": \"\"\n"
" } ]\n"
" } }\n"
" ]\n"
"}";
constexpr char kDefaultServiceConfigWithoutLoadReporting[] =
"{\n"
" \"loadBalancingConfig\":[\n"
" { \"does_not_exist\":{} },\n"
" { \"xds_cluster_resolver_experimental\":{\n"
" \"discoveryMechanisms\": [\n"
" { \"clusterName\": \"server.example.com\",\n"
" \"type\": \"EDS\"\n"
" } ]\n"
" } }\n"
" ]\n"
"}";
4 years ago
constexpr char kBootstrapFileV3[] =
"{\n"
" \"xds_servers\": [\n"
" {\n"
" \"server_uri\": \"fake:///xds_server\",\n"
4 years ago
" \"channel_creds\": [\n"
" {\n"
" \"type\": \"fake\"\n"
" }\n"
" ],\n"
" \"server_features\": [\"xds_v3\"]\n"
" }\n"
" ],\n"
" \"node\": {\n"
" \"id\": \"xds_end2end_test\",\n"
" \"cluster\": \"test\",\n"
" \"metadata\": {\n"
" \"foo\": \"bar\"\n"
" },\n"
" \"locality\": {\n"
" \"region\": \"corp\",\n"
" \"zone\": \"svl\",\n"
" \"sub_zone\": \"mp3\"\n"
4 years ago
" }\n"
" },\n"
" \"certificate_providers\": {\n"
" \"fake_plugin1\": {\n"
" \"plugin_name\": \"fake1\"\n"
" },\n"
" \"fake_plugin2\": {\n"
" \"plugin_name\": \"fake2\"\n"
" },\n"
" \"file_plugin\": {\n"
" \"plugin_name\": \"file_watcher\",\n"
" \"config\": {\n"
" \"certificate_file\": \"src/core/tsi/test_creds/client.pem\",\n"
" \"private_key_file\": \"src/core/tsi/test_creds/client.key\",\n"
" \"ca_certificate_file\": \"src/core/tsi/test_creds/ca.pem\"\n"
" }"
" }\n"
4 years ago
" }\n"
"}\n";
constexpr char kBootstrapFileV2[] =
"{\n"
" \"xds_servers\": [\n"
" {\n"
" \"server_uri\": \"fake:///xds_server\",\n"
" \"channel_creds\": [\n"
" {\n"
" \"type\": \"fake\"\n"
" }\n"
" ]\n"
" }\n"
" ],\n"
" \"node\": {\n"
" \"id\": \"xds_end2end_test\",\n"
" \"cluster\": \"test\",\n"
" \"metadata\": {\n"
" \"foo\": \"bar\"\n"
" },\n"
" \"locality\": {\n"
" \"region\": \"corp\",\n"
" \"zone\": \"svl\",\n"
" \"sub_zone\": \"mp3\"\n"
" }\n"
" }\n"
"}\n";
constexpr char kCaCertPath[] = "src/core/tsi/test_creds/ca.pem";
constexpr char kServerCertPath[] = "src/core/tsi/test_creds/server1.pem";
constexpr char kServerKeyPath[] = "src/core/tsi/test_creds/server1.key";
constexpr char kClientCertPath[] = "src/core/tsi/test_creds/client.pem";
constexpr char kClientKeyPath[] = "src/core/tsi/test_creds/client.key";
constexpr char kBadClientCertPath[] = "src/core/tsi/test_creds/badclient.pem";
constexpr char kBadClientKeyPath[] = "src/core/tsi/test_creds/badclient.key";
4 years ago
char* g_bootstrap_file_v3;
char* g_bootstrap_file_v2;
void WriteBootstrapFiles() {
char* bootstrap_file;
4 years ago
FILE* out = gpr_tmpfile("xds_bootstrap_v3", &bootstrap_file);
fputs(kBootstrapFileV3, out);
fclose(out);
4 years ago
g_bootstrap_file_v3 = bootstrap_file;
out = gpr_tmpfile("xds_bootstrap_v2", &bootstrap_file);
fputs(kBootstrapFileV2, out);
fclose(out);
g_bootstrap_file_v2 = bootstrap_file;
}
// Helper class to minimize the number of unique ports we use for this test.
class PortSaver {
public:
int GetPort() {
if (idx_ >= ports_.size()) {
ports_.push_back(grpc_pick_unused_port_or_die());
}
return ports_[idx_++];
}
void Reset() { idx_ = 0; }
private:
std::vector<int> ports_;
size_t idx_ = 0;
};
PortSaver* g_port_saver = nullptr;
template <typename ServiceType>
class CountedService : public ServiceType {
public:
size_t request_count() {
grpc_core::MutexLock lock(&mu_);
return request_count_;
}
size_t response_count() {
grpc_core::MutexLock lock(&mu_);
return response_count_;
}
void IncreaseResponseCount() {
grpc_core::MutexLock lock(&mu_);
++response_count_;
}
void IncreaseRequestCount() {
grpc_core::MutexLock lock(&mu_);
++request_count_;
}
void ResetCounters() {
grpc_core::MutexLock lock(&mu_);
request_count_ = 0;
response_count_ = 0;
}
private:
grpc_core::Mutex mu_;
size_t request_count_ = 0;
size_t response_count_ = 0;
};
template <typename RpcService>
class BackendServiceImpl
: public CountedService<TestMultipleServiceImpl<RpcService>> {
public:
BackendServiceImpl() {}
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
auto peer_identity = context->auth_context()->GetPeerIdentity();
CountedService<TestMultipleServiceImpl<RpcService>>::IncreaseRequestCount();
const auto status =
TestMultipleServiceImpl<RpcService>::Echo(context, request, response);
CountedService<
TestMultipleServiceImpl<RpcService>>::IncreaseResponseCount();
{
grpc_core::MutexLock lock(&mu_);
clients_.insert(context->peer());
last_peer_identity_.clear();
for (const auto& entry : peer_identity) {
last_peer_identity_.emplace_back(entry.data(), entry.size());
}
}
return status;
}
Status Echo1(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
return Echo(context, request, response);
}
Status Echo2(ServerContext* context, const EchoRequest* request,
EchoResponse* response) override {
return Echo(context, request, response);
}
void Start() {}
void Shutdown() {}
std::set<std::string> clients() {
grpc_core::MutexLock lock(&mu_);
return clients_;
}
const std::vector<std::string>& last_peer_identity() {
grpc_core::MutexLock lock(&mu_);
return last_peer_identity_;
}
private:
grpc_core::Mutex mu_;
std::set<std::string> clients_;
std::vector<std::string> last_peer_identity_;
};
class ClientStats {
public:
struct LocalityStats {
LocalityStats() {}
// Converts from proto message class.
4 years ago
template <class UpstreamLocalityStats>
explicit LocalityStats(const UpstreamLocalityStats& upstream_locality_stats)
: total_successful_requests(
upstream_locality_stats.total_successful_requests()),
total_requests_in_progress(
upstream_locality_stats.total_requests_in_progress()),
total_error_requests(upstream_locality_stats.total_error_requests()),
total_issued_requests(
upstream_locality_stats.total_issued_requests()) {}
LocalityStats& operator+=(const LocalityStats& other) {
total_successful_requests += other.total_successful_requests;
total_requests_in_progress += other.total_requests_in_progress;
total_error_requests += other.total_error_requests;
total_issued_requests += other.total_issued_requests;
return *this;
}
uint64_t total_successful_requests = 0;
uint64_t total_requests_in_progress = 0;
uint64_t total_error_requests = 0;
uint64_t total_issued_requests = 0;
};
ClientStats() {}
// Converts from proto message class.
4 years ago
template <class ClusterStats>
explicit ClientStats(const ClusterStats& cluster_stats)
: cluster_name_(cluster_stats.cluster_name()),
total_dropped_requests_(cluster_stats.total_dropped_requests()) {
for (const auto& input_locality_stats :
cluster_stats.upstream_locality_stats()) {
locality_stats_.emplace(input_locality_stats.locality().sub_zone(),
LocalityStats(input_locality_stats));
}
for (const auto& input_dropped_requests :
cluster_stats.dropped_requests()) {
dropped_requests_.emplace(input_dropped_requests.category(),
input_dropped_requests.dropped_count());
}
}
const std::string& cluster_name() const { return cluster_name_; }
const std::map<std::string, LocalityStats>& locality_stats() const {
return locality_stats_;
}
uint64_t total_successful_requests() const {
uint64_t sum = 0;
for (auto& p : locality_stats_) {
sum += p.second.total_successful_requests;
}
return sum;
}
uint64_t total_requests_in_progress() const {
uint64_t sum = 0;
for (auto& p : locality_stats_) {
sum += p.second.total_requests_in_progress;
}
return sum;
}
uint64_t total_error_requests() const {
uint64_t sum = 0;
for (auto& p : locality_stats_) {
sum += p.second.total_error_requests;
}
return sum;
}
uint64_t total_issued_requests() const {
uint64_t sum = 0;
for (auto& p : locality_stats_) {
sum += p.second.total_issued_requests;
}
return sum;
}
uint64_t total_dropped_requests() const { return total_dropped_requests_; }
uint64_t dropped_requests(const std::string& category) const {
auto iter = dropped_requests_.find(category);
GPR_ASSERT(iter != dropped_requests_.end());
return iter->second;
}
ClientStats& operator+=(const ClientStats& other) {
for (const auto& p : other.locality_stats_) {
locality_stats_[p.first] += p.second;
}
total_dropped_requests_ += other.total_dropped_requests_;
for (const auto& p : other.dropped_requests_) {
dropped_requests_[p.first] += p.second;
}
return *this;
}
private:
std::string cluster_name_;
std::map<std::string, LocalityStats> locality_stats_;
uint64_t total_dropped_requests_ = 0;
std::map<std::string, uint64_t> dropped_requests_;
};
4 years ago
class AdsServiceImpl : public std::enable_shared_from_this<AdsServiceImpl> {
public:
struct ResponseState {
enum State { NOT_SENT, SENT, ACKED, NACKED };
State state = NOT_SENT;
std::string error_message;
};
struct EdsResourceArgs {
struct Locality {
Locality(std::string sub_zone, std::vector<int> ports,
int lb_weight = kDefaultLocalityWeight,
int priority = kDefaultLocalityPriority,
4 years ago
std::vector<HealthStatus> health_statuses = {})
: sub_zone(std::move(sub_zone)),
ports(std::move(ports)),
lb_weight(lb_weight),
priority(priority),
health_statuses(std::move(health_statuses)) {}
const std::string sub_zone;
std::vector<int> ports;
int lb_weight;
int priority;
4 years ago
std::vector<HealthStatus> health_statuses;
};
EdsResourceArgs() = default;
explicit EdsResourceArgs(std::vector<Locality> locality_list)
: locality_list(std::move(locality_list)) {}
std::vector<Locality> locality_list;
std::map<std::string, uint32_t> drop_categories;
FractionalPercent::DenominatorType drop_denominator =
FractionalPercent::MILLION;
};
AdsServiceImpl()
: v2_rpc_service_(this, /*is_v2=*/true),
v3_rpc_service_(this, /*is_v2=*/false) {}
4 years ago
bool seen_v2_client() const { return seen_v2_client_; }
bool seen_v3_client() const { return seen_v3_client_; }
::envoy::service::discovery::v2::AggregatedDiscoveryService::Service*
v2_rpc_service() {
return &v2_rpc_service_;
}
::envoy::service::discovery::v3::AggregatedDiscoveryService::Service*
v3_rpc_service() {
return &v3_rpc_service_;
}
ResponseState lds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
return resource_type_response_state_[kLdsTypeUrl];
}
ResponseState rds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
return resource_type_response_state_[kRdsTypeUrl];
}
ResponseState cds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
return resource_type_response_state_[kCdsTypeUrl];
}
ResponseState eds_response_state() {
grpc_core::MutexLock lock(&ads_mu_);
return resource_type_response_state_[kEdsTypeUrl];
}
void SetResourceIgnore(const std::string& type_url) {
grpc_core::MutexLock lock(&ads_mu_);
resource_types_to_ignore_.emplace(type_url);
}
void SetResourceMinVersion(const std::string& type_url, int version) {
grpc_core::MutexLock lock(&ads_mu_);
resource_type_min_versions_[type_url] = version;
}
void UnsetResource(const std::string& type_url, const std::string& name) {
grpc_core::MutexLock lock(&ads_mu_);
ResourceTypeState& resource_type_state = resource_map_[type_url];
++resource_type_state.resource_type_version;
ResourceState& resource_state = resource_type_state.resource_name_map[name];
resource_state.resource_type_version =
resource_type_state.resource_type_version;
resource_state.resource.reset();
gpr_log(GPR_INFO,
"ADS[%p]: Unsetting %s resource %s; resource_type_version now %u",
this, type_url.c_str(), name.c_str(),
resource_type_state.resource_type_version);
for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
}
}
void SetResource(google::protobuf::Any resource, const std::string& type_url,
const std::string& name) {
grpc_core::MutexLock lock(&ads_mu_);
ResourceTypeState& resource_type_state = resource_map_[type_url];
++resource_type_state.resource_type_version;
ResourceState& resource_state = resource_type_state.resource_name_map[name];
resource_state.resource_type_version =
resource_type_state.resource_type_version;
resource_state.resource = std::move(resource);
gpr_log(GPR_INFO,
"ADS[%p]: Updating %s resource %s; resource_type_version now %u",
this, type_url.c_str(), name.c_str(),
resource_type_state.resource_type_version);
for (SubscriptionState* subscription : resource_state.subscriptions) {
subscription->update_queue->emplace_back(type_url, name);
}
}
void SetLdsResource(const Listener& listener) {
google::protobuf::Any resource;
resource.PackFrom(listener);
SetResource(std::move(resource), kLdsTypeUrl, listener.name());
}
void SetRdsResource(const RouteConfiguration& route) {
google::protobuf::Any resource;
resource.PackFrom(route);
SetResource(std::move(resource), kRdsTypeUrl, route.name());
}
void SetCdsResource(const Cluster& cluster) {
google::protobuf::Any resource;
resource.PackFrom(cluster);
SetResource(std::move(resource), kCdsTypeUrl, cluster.name());
}
void SetEdsResource(const ClusterLoadAssignment& assignment) {
google::protobuf::Any resource;
resource.PackFrom(assignment);
SetResource(std::move(resource), kEdsTypeUrl, assignment.cluster_name());
}
void Start() {
grpc_core::MutexLock lock(&ads_mu_);
ads_done_ = false;
}
void Shutdown() {
{
grpc_core::MutexLock lock(&ads_mu_);
NotifyDoneWithAdsCallLocked();
resource_type_response_state_.clear();
}
gpr_log(GPR_INFO, "ADS[%p]: shut down", this);
}
5 years ago
void NotifyDoneWithAdsCall() {
grpc_core::MutexLock lock(&ads_mu_);
NotifyDoneWithAdsCallLocked();
}
5 years ago
void NotifyDoneWithAdsCallLocked() {
if (!ads_done_) {
ads_done_ = true;
ads_cond_.SignalAll();
}
}
std::set<std::string> clients() {
grpc_core::MutexLock lock(&clients_mu_);
return clients_;
}
private:
// A queue of resource type/name pairs that have changed since the client
// subscribed to them.
using UpdateQueue = std::deque<
std::pair<std::string /* type url */, std::string /* resource name */>>;
// A struct representing a client's subscription to a particular resource.
struct SubscriptionState {
// The queue upon which to place updates when the resource is updated.
UpdateQueue* update_queue;
};
// A struct representing the a client's subscription to all the resources.
using SubscriptionNameMap =
std::map<std::string /* resource_name */, SubscriptionState>;
using SubscriptionMap =
std::map<std::string /* type_url */, SubscriptionNameMap>;
// Sent state for a given resource type.
struct SentState {
int nonce = 0;
int resource_type_version = 0;
};
// A struct representing the current state for an individual resource.
struct ResourceState {
// The resource itself, if present.
absl::optional<google::protobuf::Any> resource;
// The resource type version that this resource was last updated in.
int resource_type_version = 0;
// A list of subscriptions to this resource.
std::set<SubscriptionState*> subscriptions;
};
// The current state for all individual resources of a given type.
using ResourceNameMap =
std::map<std::string /* resource_name */, ResourceState>;
struct ResourceTypeState {
int resource_type_version = 0;
ResourceNameMap resource_name_map;
};
using ResourceMap = std::map<std::string /* type_url */, ResourceTypeState>;
4 years ago
template <class RpcApi, class DiscoveryRequest, class DiscoveryResponse>
class RpcService : public RpcApi::Service {
public:
using Stream = ServerReaderWriter<DiscoveryResponse, DiscoveryRequest>;
RpcService(AdsServiceImpl* parent, bool is_v2)
: parent_(parent), is_v2_(is_v2) {}
Status StreamAggregatedResources(ServerContext* context,
Stream* stream) override {
gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources starts", this);
parent_->AddClient(context->peer());
4 years ago
if (is_v2_) {
parent_->seen_v2_client_ = true;
} else {
parent_->seen_v3_client_ = true;
}
// Take a reference of the AdsServiceImpl object, which will go
// out of scope when this request handler returns. This ensures
// that the parent won't be destroyed until this stream is complete.
std::shared_ptr<AdsServiceImpl> ads_service_impl =
parent_->shared_from_this();
4 years ago
// Resources (type/name pairs) that have changed since the client
// subscribed to them.
UpdateQueue update_queue;
// Resources that the client will be subscribed to keyed by resource type
// url.
SubscriptionMap subscription_map;
// Sent state for each resource type.
std::map<std::string /*type_url*/, SentState> sent_state_map;
// Spawn a thread to read requests from the stream.
// Requests will be delivered to this thread in a queue.
std::deque<DiscoveryRequest> requests;
bool stream_closed = false;
std::thread reader(std::bind(&RpcService::BlockingRead, this, stream,
&requests, &stream_closed));
// Main loop to process requests and updates.
while (true) {
// Boolean to keep track if the loop received any work to do: a
// request or an update; regardless whether a response was actually
// sent out.
bool did_work = false;
// Look for new requests and and decide what to handle.
absl::optional<DiscoveryResponse> response;
4 years ago
{
grpc_core::MutexLock lock(&parent_->ads_mu_);
// If the stream has been closed or our parent is being shut
// down, stop immediately.
if (stream_closed || parent_->ads_done_) break;
// Otherwise, see if there's a request to read from the queue.
if (!requests.empty()) {
DiscoveryRequest request = std::move(requests.front());
requests.pop_front();
did_work = true;
gpr_log(GPR_INFO,
"ADS[%p]: Received request for type %s with content %s",
this, request.type_url().c_str(),
request.DebugString().c_str());
const std::string v3_resource_type =
TypeUrlToV3(request.type_url());
SentState& sent_state = sent_state_map[v3_resource_type];
// Process request.
ProcessRequest(request, v3_resource_type, &update_queue,
&subscription_map, &sent_state, &response);
4 years ago
}
}
if (response.has_value()) {
gpr_log(GPR_INFO, "ADS[%p]: Sending response: %s", this,
response->DebugString().c_str());
stream->Write(response.value());
}
response.reset();
// Look for updates and decide what to handle.
{
grpc_core::MutexLock lock(&parent_->ads_mu_);
if (!update_queue.empty()) {
const std::string resource_type =
std::move(update_queue.front().first);
const std::string resource_name =
std::move(update_queue.front().second);
update_queue.pop_front();
did_work = true;
SentState& sent_state = sent_state_map[resource_type];
ProcessUpdate(resource_type, resource_name, &subscription_map,
&sent_state, &response);
4 years ago
}
}
if (response.has_value()) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update response: %s", this,
response->DebugString().c_str());
stream->Write(response.value());
}
// If we didn't find anything to do, delay before the next loop
// iteration; otherwise, check whether we should exit and then
// immediately continue.
gpr_timespec deadline =
grpc_timeout_milliseconds_to_deadline(did_work ? 0 : 10);
{
grpc_core::MutexLock lock(&parent_->ads_mu_);
if (!grpc_core::WaitUntilWithDeadline(
&parent_->ads_cond_, &parent_->ads_mu_,
[this] { return parent_->ads_done_; },
grpc_core::ToAbslTime(deadline))) {
break;
4 years ago
}
}
}
// Done with main loop. Clean up before returning.
// Join reader thread.
reader.join();
4 years ago
// Clean up any subscriptions that were still active when the call
// finished.
{
4 years ago
grpc_core::MutexLock lock(&parent_->ads_mu_);
for (auto& p : subscription_map) {
const std::string& type_url = p.first;
SubscriptionNameMap& subscription_name_map = p.second;
for (auto& q : subscription_name_map) {
const std::string& resource_name = q.first;
SubscriptionState& subscription_state = q.second;
ResourceNameMap& resource_name_map =
parent_->resource_map_[type_url].resource_name_map;
ResourceState& resource_state = resource_name_map[resource_name];
4 years ago
resource_state.subscriptions.erase(&subscription_state);
}
}
}
4 years ago
gpr_log(GPR_INFO, "ADS[%p]: StreamAggregatedResources done", this);
parent_->RemoveClient(context->peer());
4 years ago
return Status::OK;
}
4 years ago
private:
// Processes a response read from the client.
// Populates response if needed.
void ProcessRequest(const DiscoveryRequest& request,
const std::string& v3_resource_type,
UpdateQueue* update_queue,
SubscriptionMap* subscription_map,
SentState* sent_state,
absl::optional<DiscoveryResponse>* response) {
// Check the nonce sent by the client, if any.
// (This will be absent on the first request on a stream.)
if (request.response_nonce().empty()) {
int client_resource_type_version = 0;
if (!request.version_info().empty()) {
GPR_ASSERT(absl::SimpleAtoi(request.version_info(),
&client_resource_type_version));
}
EXPECT_GE(client_resource_type_version,
parent_->resource_type_min_versions_[v3_resource_type])
<< "resource_type: " << v3_resource_type;
} else {
int client_nonce;
GPR_ASSERT(absl::SimpleAtoi(request.response_nonce(), &client_nonce));
// Ignore requests with stale nonces.
if (client_nonce < sent_state->nonce) return;
// Check for ACK or NACK.
auto it = parent_->resource_type_response_state_.find(v3_resource_type);
if (it != parent_->resource_type_response_state_.end()) {
if (!request.has_error_detail()) {
it->second.state = ResponseState::ACKED;
it->second.error_message.clear();
gpr_log(GPR_INFO,
"ADS[%p]: client ACKed resource_type=%s version=%s", this,
request.type_url().c_str(), request.version_info().c_str());
} else {
it->second.state = ResponseState::NACKED;
EXPECT_EQ(request.error_detail().code(),
GRPC_STATUS_INVALID_ARGUMENT);
it->second.error_message = request.error_detail().message();
gpr_log(GPR_INFO,
"ADS[%p]: client NACKed resource_type=%s version=%s: %s",
this, request.type_url().c_str(),
request.version_info().c_str(),
it->second.error_message.c_str());
}
}
}
// Ignore resource types as requested by tests.
if (parent_->resource_types_to_ignore_.find(v3_resource_type) !=
parent_->resource_types_to_ignore_.end()) {
return;
}
// Look at all the resource names in the request.
auto& subscription_name_map = (*subscription_map)[v3_resource_type];
auto& resource_type_state = parent_->resource_map_[v3_resource_type];
auto& resource_name_map = resource_type_state.resource_name_map;
std::set<std::string> resources_in_current_request;
std::set<std::string> resources_added_to_response;
for (const std::string& resource_name : request.resource_names()) {
resources_in_current_request.emplace(resource_name);
auto& subscription_state = subscription_name_map[resource_name];
auto& resource_state = resource_name_map[resource_name];
// Subscribe if needed.
// Send the resource in the response if either (a) this is
// a new subscription or (b) there is an updated version of
// this resource to send.
if (parent_->MaybeSubscribe(v3_resource_type, resource_name,
&subscription_state, &resource_state,
update_queue) ||
ClientNeedsResourceUpdate(resource_type_state, resource_state,
sent_state->resource_type_version)) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
request.type_url().c_str(), resource_name.c_str());
resources_added_to_response.emplace(resource_name);
if (!response->has_value()) response->emplace();
if (resource_state.resource.has_value()) {
auto* resource = (*response)->add_resources();
resource->CopyFrom(resource_state.resource.value());
if (is_v2_) {
resource->set_type_url(request.type_url());
}
}
} else {
gpr_log(GPR_INFO,
"ADS[%p]: client does not need update for type=%s name=%s",
this, request.type_url().c_str(), resource_name.c_str());
}
}
// Process unsubscriptions for any resource no longer
// present in the request's resource list.
parent_->ProcessUnsubscriptions(
v3_resource_type, resources_in_current_request,
&subscription_name_map, &resource_name_map);
// Construct response if needed.
if (!resources_added_to_response.empty()) {
CompleteBuildingDiscoveryResponse(
v3_resource_type, request.type_url(),
resource_type_state.resource_type_version, subscription_name_map,
resources_added_to_response, sent_state, &response->value());
}
4 years ago
}
// Processes a resource update from the test.
// Populates response if needed.
void ProcessUpdate(const std::string& resource_type,
const std::string& resource_name,
SubscriptionMap* subscription_map, SentState* sent_state,
absl::optional<DiscoveryResponse>* response) {
const std::string v2_resource_type = TypeUrlToV2(resource_type);
gpr_log(GPR_INFO, "ADS[%p]: Received update for type=%s name=%s", this,
resource_type.c_str(), resource_name.c_str());
auto& subscription_name_map = (*subscription_map)[resource_type];
auto& resource_type_state = parent_->resource_map_[resource_type];
auto& resource_name_map = resource_type_state.resource_name_map;
auto it = subscription_name_map.find(resource_name);
if (it != subscription_name_map.end()) {
ResourceState& resource_state = resource_name_map[resource_name];
if (ClientNeedsResourceUpdate(resource_type_state, resource_state,
4 years ago
sent_state->resource_type_version)) {
gpr_log(GPR_INFO, "ADS[%p]: Sending update for type=%s name=%s", this,
resource_type.c_str(), resource_name.c_str());
response->emplace();
if (resource_state.resource.has_value()) {
auto* resource = (*response)->add_resources();
resource->CopyFrom(resource_state.resource.value());
if (is_v2_) {
resource->set_type_url(v2_resource_type);
}
}
CompleteBuildingDiscoveryResponse(
resource_type, v2_resource_type,
resource_type_state.resource_type_version, subscription_name_map,
{resource_name}, sent_state, &response->value());
}
}
4 years ago
}
// Starting a thread to do blocking read on the stream until cancel.
void BlockingRead(Stream* stream, std::deque<DiscoveryRequest>* requests,
bool* stream_closed) {
DiscoveryRequest request;
bool seen_first_request = false;
while (stream->Read(&request)) {
if (!seen_first_request) {
EXPECT_TRUE(request.has_node());
ASSERT_FALSE(request.node().client_features().empty());
EXPECT_EQ(request.node().client_features(0),
"envoy.lb.does_not_support_overprovisioning");
CheckBuildVersion(request);
seen_first_request = true;
}
{
grpc_core::MutexLock lock(&parent_->ads_mu_);
requests->emplace_back(std::move(request));
}
}
gpr_log(GPR_INFO, "ADS[%p]: Null read, stream closed", this);
grpc_core::MutexLock lock(&parent_->ads_mu_);
*stream_closed = true;
}
// Completing the building a DiscoveryResponse by adding common information
// for all resources and by adding all subscribed resources for LDS and CDS.
void CompleteBuildingDiscoveryResponse(
const std::string& resource_type, const std::string& v2_resource_type,
const int version, const SubscriptionNameMap& subscription_name_map,
const std::set<std::string>& resources_added_to_response,
SentState* sent_state, DiscoveryResponse* response) {
4 years ago
auto& response_state =
parent_->resource_type_response_state_[resource_type];
if (response_state.state == ResponseState::NOT_SENT) {
response_state.state = ResponseState::SENT;
}
response->set_type_url(is_v2_ ? v2_resource_type : resource_type);
response->set_version_info(std::to_string(version));
response->set_nonce(std::to_string(++sent_state->nonce));
4 years ago
if (resource_type == kLdsTypeUrl || resource_type == kCdsTypeUrl) {
// For LDS and CDS we must send back all subscribed resources
// (even the unchanged ones)
for (const auto& p : subscription_name_map) {
const std::string& resource_name = p.first;
if (resources_added_to_response.find(resource_name) ==
4 years ago
resources_added_to_response.end()) {
ResourceNameMap& resource_name_map =
parent_->resource_map_[resource_type].resource_name_map;
4 years ago
const ResourceState& resource_state =
resource_name_map[resource_name];
4 years ago
if (resource_state.resource.has_value()) {
auto* resource = response->add_resources();
resource->CopyFrom(resource_state.resource.value());
if (is_v2_) {
resource->set_type_url(v2_resource_type);
}
}
}
}
}
sent_state->resource_type_version = version;
4 years ago
}
static std::string TypeUrlToV2(const std::string& resource_type) {
if (resource_type == kLdsTypeUrl) return kLdsV2TypeUrl;
if (resource_type == kRdsTypeUrl) return kRdsV2TypeUrl;
if (resource_type == kCdsTypeUrl) return kCdsV2TypeUrl;
if (resource_type == kEdsTypeUrl) return kEdsV2TypeUrl;
return resource_type;
}
static std::string TypeUrlToV3(const std::string& resource_type) {
if (resource_type == kLdsV2TypeUrl) return kLdsTypeUrl;
if (resource_type == kRdsV2TypeUrl) return kRdsTypeUrl;
if (resource_type == kCdsV2TypeUrl) return kCdsTypeUrl;
if (resource_type == kEdsV2TypeUrl) return kEdsTypeUrl;
return resource_type;
}
static void CheckBuildVersion(
const ::envoy::api::v2::DiscoveryRequest& request) {
EXPECT_FALSE(request.node().build_version().empty());
}
static void CheckBuildVersion(
const ::envoy::service::discovery::v3::DiscoveryRequest& /*request*/) {}
4 years ago
AdsServiceImpl* parent_;
const bool is_v2_;
};
// Checks whether the client needs to receive a newer version of
// the resource.
static bool ClientNeedsResourceUpdate(
const ResourceTypeState& resource_type_state,
4 years ago
const ResourceState& resource_state, int client_resource_type_version) {
return client_resource_type_version <
resource_type_state.resource_type_version &&
resource_state.resource_type_version <=
resource_type_state.resource_type_version;
}
// Subscribes to a resource if not already subscribed:
// 1. Sets the update_queue field in subscription_state.
// 2. Adds subscription_state to resource_state->subscriptions.
bool MaybeSubscribe(const std::string& resource_type,
const std::string& resource_name,
SubscriptionState* subscription_state,
ResourceState* resource_state,
UpdateQueue* update_queue) {
// The update_queue will be null if we were not previously subscribed.
if (subscription_state->update_queue != nullptr) return false;
subscription_state->update_queue = update_queue;
resource_state->subscriptions.emplace(subscription_state);
gpr_log(GPR_INFO, "ADS[%p]: subscribe to resource type %s name %s state %p",
this, resource_type.c_str(), resource_name.c_str(),
&subscription_state);
return true;
}
// Removes subscriptions for resources no longer present in the
// current request.
void ProcessUnsubscriptions(
const std::string& resource_type,
const std::set<std::string>& resources_in_current_request,
SubscriptionNameMap* subscription_name_map,
ResourceNameMap* resource_name_map) {
for (auto it = subscription_name_map->begin();
it != subscription_name_map->end();) {
const std::string& resource_name = it->first;
SubscriptionState& subscription_state = it->second;
if (resources_in_current_request.find(resource_name) !=
resources_in_current_request.end()) {
++it;
continue;
}
gpr_log(GPR_INFO, "ADS[%p]: Unsubscribe to type=%s name=%s state=%p",
this, resource_type.c_str(), resource_name.c_str(),
&subscription_state);
auto resource_it = resource_name_map->find(resource_name);
GPR_ASSERT(resource_it != resource_name_map->end());
auto& resource_state = resource_it->second;
resource_state.subscriptions.erase(&subscription_state);
if (resource_state.subscriptions.empty() &&
!resource_state.resource.has_value()) {
resource_name_map->erase(resource_it);
}
it = subscription_name_map->erase(it);
}
}
void AddClient(const std::string& client) {
grpc_core::MutexLock lock(&clients_mu_);
clients_.insert(client);
}
void RemoveClient(const std::string& client) {
grpc_core::MutexLock lock(&clients_mu_);
clients_.erase(client);
}
4 years ago
RpcService<::envoy::service::discovery::v2::AggregatedDiscoveryService,
::envoy::api::v2::DiscoveryRequest,
::envoy::api::v2::DiscoveryResponse>
v2_rpc_service_;
RpcService<::envoy::service::discovery::v3::AggregatedDiscoveryService,
::envoy::service::discovery::v3::DiscoveryRequest,
::envoy::service::discovery::v3::DiscoveryResponse>
v3_rpc_service_;
std::atomic_bool seen_v2_client_{false};
std::atomic_bool seen_v3_client_{false};
5 years ago
grpc_core::CondVar ads_cond_;
// Protect the members below.
5 years ago
grpc_core::Mutex ads_mu_;
bool ads_done_ = false;
std::map<std::string /* type_url */, ResponseState>
resource_type_response_state_;
std::set<std::string /*resource_type*/> resource_types_to_ignore_;
std::map<std::string /*resource_type*/, int> resource_type_min_versions_;
// An instance data member containing the current state of all resources.
// Note that an entry will exist whenever either of the following is true:
// - The resource exists (i.e., has been created by SetResource() and has not
// yet been destroyed by UnsetResource()).
// - There is at least one subscription for the resource.
ResourceMap resource_map_;
grpc_core::Mutex clients_mu_;
std::set<std::string> clients_;
};
4 years ago
class LrsServiceImpl : public std::enable_shared_from_this<LrsServiceImpl> {
public:
explicit LrsServiceImpl(int client_load_reporting_interval_seconds)
4 years ago
: v2_rpc_service_(this),
v3_rpc_service_(this),
client_load_reporting_interval_seconds_(
client_load_reporting_interval_seconds),
cluster_names_({kDefaultClusterName}) {}
4 years ago
::envoy::service::load_stats::v2::LoadReportingService::Service*
v2_rpc_service() {
return &v2_rpc_service_;
}
::envoy::service::load_stats::v3::LoadReportingService::Service*
v3_rpc_service() {
return &v3_rpc_service_;
}
size_t request_count() {
return v2_rpc_service_.request_count() + v3_rpc_service_.request_count();
}
size_t response_count() {
return v2_rpc_service_.response_count() + v3_rpc_service_.response_count();
}
// Must be called before the LRS call is started.
void set_send_all_clusters(bool send_all_clusters) {
send_all_clusters_ = send_all_clusters;
}
void set_cluster_names(const std::set<std::string>& cluster_names) {
cluster_names_ = cluster_names;
}
void Start() {
lrs_done_ = false;
result_queue_.clear();
}
void Shutdown() {
{
grpc_core::MutexLock lock(&lrs_mu_);
NotifyDoneWithLrsCallLocked();
}
gpr_log(GPR_INFO, "LRS[%p]: shut down", this);
}
std::vector<ClientStats> WaitForLoadReport() {
grpc_core::MutexLock lock(&load_report_mu_);
grpc_core::CondVar cv;
if (result_queue_.empty()) {
load_report_cond_ = &cv;
grpc_core::WaitUntil(load_report_cond_, &load_report_mu_,
[this] { return !result_queue_.empty(); });
load_report_cond_ = nullptr;
}
std::vector<ClientStats> result = std::move(result_queue_.front());
result_queue_.pop_front();
return result;
}
void NotifyDoneWithLrsCall() {
grpc_core::MutexLock lock(&lrs_mu_);
NotifyDoneWithLrsCallLocked();
}
private:
4 years ago
template <class RpcApi, class LoadStatsRequest, class LoadStatsResponse>
class RpcService : public CountedService<typename RpcApi::Service> {
public:
using Stream = ServerReaderWriter<LoadStatsResponse, LoadStatsRequest>;
explicit RpcService(LrsServiceImpl* parent) : parent_(parent) {}
Status StreamLoadStats(ServerContext* /*context*/,
Stream* stream) override {
gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats starts", this);
EXPECT_GT(parent_->client_load_reporting_interval_seconds_, 0);
4 years ago
// Take a reference of the LrsServiceImpl object, reference will go
// out of scope after this method exits.
std::shared_ptr<LrsServiceImpl> lrs_service_impl =
parent_->shared_from_this();
// Read initial request.
LoadStatsRequest request;
if (stream->Read(&request)) {
CountedService<typename RpcApi::Service>::IncreaseRequestCount();
// Verify client features.
EXPECT_THAT(
request.node().client_features(),
::testing::Contains("envoy.lrs.supports_send_all_clusters"));
// Send initial response.
LoadStatsResponse response;
if (parent_->send_all_clusters_) {
response.set_send_all_clusters(true);
} else {
for (const std::string& cluster_name : parent_->cluster_names_) {
response.add_clusters(cluster_name);
}
}
response.mutable_load_reporting_interval()->set_seconds(
parent_->client_load_reporting_interval_seconds_);
stream->Write(response);
CountedService<typename RpcApi::Service>::IncreaseResponseCount();
// Wait for report.
request.Clear();
while (stream->Read(&request)) {
gpr_log(GPR_INFO, "LRS[%p]: received client load report message: %s",
this, request.DebugString().c_str());
std::vector<ClientStats> stats;
for (const auto& cluster_stats : request.cluster_stats()) {
stats.emplace_back(cluster_stats);
}
grpc_core::MutexLock lock(&parent_->load_report_mu_);
parent_->result_queue_.emplace_back(std::move(stats));
if (parent_->load_report_cond_ != nullptr) {
parent_->load_report_cond_->Signal();
}
}
// Wait until notified done.
grpc_core::MutexLock lock(&parent_->lrs_mu_);
grpc_core::WaitUntil(&parent_->lrs_cv_, &parent_->lrs_mu_,
[this] { return parent_->lrs_done_; });
4 years ago
}
gpr_log(GPR_INFO, "LRS[%p]: StreamLoadStats done", this);
return Status::OK;
}
private:
LrsServiceImpl* parent_;
};
void NotifyDoneWithLrsCallLocked() {
if (!lrs_done_) {
lrs_done_ = true;
lrs_cv_.SignalAll();
}
}
4 years ago
RpcService<::envoy::service::load_stats::v2::LoadReportingService,
::envoy::service::load_stats::v2::LoadStatsRequest,
::envoy::service::load_stats::v2::LoadStatsResponse>
v2_rpc_service_;
RpcService<::envoy::service::load_stats::v3::LoadReportingService,
::envoy::service::load_stats::v3::LoadStatsRequest,
::envoy::service::load_stats::v3::LoadStatsResponse>
v3_rpc_service_;
const int client_load_reporting_interval_seconds_;
bool send_all_clusters_ = false;
std::set<std::string> cluster_names_;
grpc_core::CondVar lrs_cv_;
grpc_core::Mutex lrs_mu_; // Protects lrs_done_.
bool lrs_done_ = false;
grpc_core::Mutex load_report_mu_; // Protects the members below.
grpc_core::CondVar* load_report_cond_ = nullptr;
std::deque<std::vector<ClientStats>> result_queue_;
};
class TestType {
public:
TestType(bool use_xds_resolver, bool enable_load_reporting,
bool enable_rds_testing = false, bool use_v2 = false,
bool use_xds_credentials = false)
: use_xds_resolver_(use_xds_resolver),
enable_load_reporting_(enable_load_reporting),
4 years ago
enable_rds_testing_(enable_rds_testing),
use_v2_(use_v2),
use_xds_credentials_(use_xds_credentials) {}
bool use_xds_resolver() const { return use_xds_resolver_; }
bool enable_load_reporting() const { return enable_load_reporting_; }
bool enable_rds_testing() const { return enable_rds_testing_; }
4 years ago
bool use_v2() const { return use_v2_; }
bool use_xds_credentials() const { return use_xds_credentials_; }
std::string AsString() const {
std::string retval = (use_xds_resolver_ ? "XdsResolver" : "FakeResolver");
4 years ago
retval += (use_v2_ ? "V2" : "V3");
if (enable_load_reporting_) retval += "WithLoadReporting";
if (enable_rds_testing_) retval += "Rds";
if (use_xds_credentials_) retval += "XdsCreds";
return retval;
}
private:
const bool use_xds_resolver_;
const bool enable_load_reporting_;
const bool enable_rds_testing_;
4 years ago
const bool use_v2_;
const bool use_xds_credentials_;
};
std::string ReadFile(const char* file_path) {
grpc_slice slice;
GPR_ASSERT(
GRPC_LOG_IF_ERROR("load_file", grpc_load_file(file_path, 0, &slice)));
std::string file_contents(grpc_core::StringViewFromSlice(slice));
grpc_slice_unref(slice);
return file_contents;
}
grpc_core::PemKeyCertPairList ReadTlsIdentityPair(const char* key_path,
const char* cert_path) {
return grpc_core::PemKeyCertPairList{
grpc_core::PemKeyCertPair(ReadFile(key_path), ReadFile(cert_path))};
}
// Based on StaticDataCertificateProvider, but provides alternate certificates
// if the certificate name is not empty.
class FakeCertificateProvider final : public grpc_tls_certificate_provider {
public:
struct CertData {
std::string root_certificate;
grpc_core::PemKeyCertPairList identity_key_cert_pairs;
};
using CertDataMap = std::map<std::string /*cert_name */, CertData>;
explicit FakeCertificateProvider(CertDataMap cert_data_map)
: distributor_(
grpc_core::MakeRefCounted<grpc_tls_certificate_distributor>()),
cert_data_map_(std::move(cert_data_map)) {
distributor_->SetWatchStatusCallback([this](std::string cert_name,
bool root_being_watched,
bool identity_being_watched) {
if (!root_being_watched && !identity_being_watched) return;
auto it = cert_data_map_.find(cert_name);
if (it == cert_data_map_.end()) {
grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(
absl::StrCat("No certificates available for cert_name \"",
cert_name, "\"")
.c_str());
distributor_->SetErrorForCert(cert_name, GRPC_ERROR_REF(error),
GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
} else {
absl::optional<std::string> root_certificate;
absl::optional<grpc_core::PemKeyCertPairList> pem_key_cert_pairs;
if (root_being_watched) {
root_certificate = it->second.root_certificate;
}
if (identity_being_watched) {
pem_key_cert_pairs = it->second.identity_key_cert_pairs;
}
distributor_->SetKeyMaterials(cert_name, std::move(root_certificate),
std::move(pem_key_cert_pairs));
}
});
}
~FakeCertificateProvider() override {
distributor_->SetWatchStatusCallback(nullptr);
}
grpc_core::RefCountedPtr<grpc_tls_certificate_distributor> distributor()
const override {
return distributor_;
}
private:
grpc_core::RefCountedPtr<grpc_tls_certificate_distributor> distributor_;
CertDataMap cert_data_map_;
};
class FakeCertificateProviderFactory
: public grpc_core::CertificateProviderFactory {
public:
class Config : public grpc_core::CertificateProviderFactory::Config {
public:
explicit Config(const char* name) : name_(name) {}
const char* name() const override { return name_; }
std::string ToString() const override { return "{}"; }
private:
const char* name_;
};
FakeCertificateProviderFactory(
const char* name, FakeCertificateProvider::CertDataMap** cert_data_map)
: name_(name), cert_data_map_(cert_data_map) {
GPR_ASSERT(cert_data_map != nullptr);
}
const char* name() const override { return name_; }
grpc_core::RefCountedPtr<grpc_core::CertificateProviderFactory::Config>
CreateCertificateProviderConfig(const grpc_core::Json& /*config_json*/,
grpc_error** /*error*/) override {
return grpc_core::MakeRefCounted<Config>(name_);
}
grpc_core::RefCountedPtr<grpc_tls_certificate_provider>
CreateCertificateProvider(
grpc_core::RefCountedPtr<grpc_core::CertificateProviderFactory::Config>
/*config*/) override {
if (*cert_data_map_ == nullptr) return nullptr;
return grpc_core::MakeRefCounted<FakeCertificateProvider>(**cert_data_map_);
}
private:
const char* name_;
FakeCertificateProvider::CertDataMap** cert_data_map_;
};
// Global variables for each provider.
FakeCertificateProvider::CertDataMap* g_fake1_cert_data_map = nullptr;
FakeCertificateProvider::CertDataMap* g_fake2_cert_data_map = nullptr;
int ServerAuthCheckSchedule(void* /* config_user_data */,
grpc_tls_server_authorization_check_arg* arg) {
arg->success = 1;
arg->status = GRPC_STATUS_OK;
return 0; /* synchronous check */
}
std::shared_ptr<ChannelCredentials> CreateTlsFallbackCredentials() {
// TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
grpc_tls_credentials_options* options = grpc_tls_credentials_options_create();
grpc_tls_credentials_options_set_server_verification_option(
options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
grpc_tls_credentials_options_set_certificate_provider(
options,
grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
ReadFile(kCaCertPath),
ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
.get());
grpc_tls_credentials_options_watch_root_certs(options);
grpc_tls_credentials_options_watch_identity_key_cert_pairs(options);
grpc_tls_server_authorization_check_config* check_config =
grpc_tls_server_authorization_check_config_create(
nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
grpc_tls_credentials_options_set_server_authorization_check_config(
options, check_config);
auto channel_creds = std::make_shared<SecureChannelCredentials>(
grpc_tls_credentials_create(options));
grpc_tls_server_authorization_check_config_release(check_config);
return channel_creds;
}
namespace {
void* response_generator_arg_copy(void* p) {
auto* generator = static_cast<grpc_core::FakeResolverResponseGenerator*>(p);
generator->Ref().release();
return p;
}
void response_generator_arg_destroy(void* p) {
auto* generator = static_cast<grpc_core::FakeResolverResponseGenerator*>(p);
generator->Unref();
}
int response_generator_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
const grpc_arg_pointer_vtable
kLogicalDnsClusterResolverResponseGeneratorVtable = {
response_generator_arg_copy, response_generator_arg_destroy,
response_generator_cmp};
} // namespace
class XdsEnd2endTest : public ::testing::TestWithParam<TestType> {
protected:
XdsEnd2endTest(size_t num_backends, size_t num_balancers,
int client_load_reporting_interval_seconds = 100,
bool use_xds_enabled_server = false,
bool bootstrap_contents_from_env_var = false)
: num_backends_(num_backends),
num_balancers_(num_balancers),
client_load_reporting_interval_seconds_(
client_load_reporting_interval_seconds),
use_xds_enabled_server_(use_xds_enabled_server),
bootstrap_contents_from_env_var_(bootstrap_contents_from_env_var) {}
void SetUp() override {
if (bootstrap_contents_from_env_var_) {
gpr_setenv("GRPC_XDS_BOOTSTRAP_CONFIG",
GetParam().use_v2() ? kBootstrapFileV2 : kBootstrapFileV3);
} else {
gpr_setenv("GRPC_XDS_BOOTSTRAP", GetParam().use_v2()
? g_bootstrap_file_v2
: g_bootstrap_file_v3);
}
g_port_saver->Reset();
bool localhost_resolves_to_ipv4 = false;
bool localhost_resolves_to_ipv6 = false;
grpc_core::LocalhostResolves(&localhost_resolves_to_ipv4,
&localhost_resolves_to_ipv6);
ipv6_only_ = !localhost_resolves_to_ipv4 && localhost_resolves_to_ipv6;
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
// Inject xDS channel response generator.
lb_channel_response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
xds_channel_args_to_add_.emplace_back(
grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
lb_channel_response_generator_.get()));
// Inject xDS logical cluster resolver response generator.
logical_dns_cluster_resolver_response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
if (xds_resource_does_not_exist_timeout_ms_ > 0) {
xds_channel_args_to_add_.emplace_back(grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS),
xds_resource_does_not_exist_timeout_ms_));
}
xds_channel_args_.num_args = xds_channel_args_to_add_.size();
xds_channel_args_.args = xds_channel_args_to_add_.data();
grpc_core::internal::SetXdsChannelArgsForTest(&xds_channel_args_);
// Make sure each test creates a new XdsClient instance rather than
// reusing the one from the previous test. This avoids spurious failures
// caused when a load reporting test runs after a non-load reporting test
// and the XdsClient is still talking to the old LRS server, which fails
// because it's not expecting the client to connect. It also
// ensures that each test can independently set the global channel
// args for the xDS channel.
grpc_core::internal::UnsetGlobalXdsClientForTest();
// Initialize default xDS resources.
// Construct LDS resource.
default_listener_.set_name(kServerName);
HttpConnectionManager http_connection_manager;
auto* filter = http_connection_manager.add_http_filters();
filter->set_name("router");
filter->mutable_typed_config()->PackFrom(
envoy::extensions::filters::http::router::v3::Router());
default_listener_.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
// Construct RDS resource.
default_route_config_.set_name(kDefaultRouteConfigurationName);
auto* virtual_host = default_route_config_.add_virtual_hosts();
virtual_host->add_domains("*");
auto* route = virtual_host->add_routes();
route->mutable_match()->set_prefix("");
route->mutable_route()->set_cluster(kDefaultClusterName);
// Construct CDS resource.
default_cluster_.set_name(kDefaultClusterName);
default_cluster_.set_type(Cluster::EDS);
auto* eds_config = default_cluster_.mutable_eds_cluster_config();
eds_config->mutable_eds_config()->mutable_ads();
eds_config->set_service_name(kDefaultEdsServiceName);
default_cluster_.set_lb_policy(Cluster::ROUND_ROBIN);
if (GetParam().enable_load_reporting()) {
default_cluster_.mutable_lrs_server()->mutable_self();
}
// Start the backends.
for (size_t i = 0; i < num_backends_; ++i) {
backends_.emplace_back(new BackendServerThread(use_xds_enabled_server_));
backends_.back()->Start();
}
// Start the load balancers.
for (size_t i = 0; i < num_balancers_; ++i) {
balancers_.emplace_back(
new BalancerServerThread(GetParam().enable_load_reporting()
? client_load_reporting_interval_seconds_
: 0));
balancers_.back()->Start();
// Initialize resources.
SetListenerAndRouteConfiguration(i, default_listener_,
default_route_config_);
balancers_.back()->ads_service()->SetCdsResource(default_cluster_);
}
ResetStub();
}
const char* DefaultEdsServiceName() const {
return GetParam().use_xds_resolver() ? kDefaultEdsServiceName : kServerName;
}
void TearDown() override {
ShutdownAllBackends();
for (auto& balancer : balancers_) balancer->Shutdown();
// Clear global xDS channel args, since they will go out of scope
// when this test object is destroyed.
grpc_core::internal::SetXdsChannelArgsForTest(nullptr);
gpr_unsetenv("GRPC_XDS_BOOTSTRAP");
gpr_unsetenv("GRPC_XDS_BOOTSTRAP_CONFIG");
}
void StartAllBackends() {
for (auto& backend : backends_) backend->Start();
}
void StartBackend(size_t index) { backends_[index]->Start(); }
void ShutdownAllBackends() {
for (auto& backend : backends_) backend->Shutdown();
}
void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
void ResetStub(int failover_timeout = 0) {
channel_ = CreateChannel(failover_timeout);
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_);
stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_);
}
std::shared_ptr<Channel> CreateChannel(
int failover_timeout = 0, const char* server_name = kServerName,
grpc_core::FakeResolverResponseGenerator* response_generator = nullptr) {
ChannelArguments args;
if (failover_timeout > 0) {
args.SetInt(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS, failover_timeout);
}
// If the parent channel is using the fake resolver, we inject the
// response generator here.
if (!GetParam().use_xds_resolver()) {
if (response_generator == nullptr) {
response_generator = response_generator_.get();
}
args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
response_generator);
}
args.SetPointerWithVtable(
GRPC_ARG_XDS_LOGICAL_DNS_CLUSTER_FAKE_RESOLVER_RESPONSE_GENERATOR,
logical_dns_cluster_resolver_response_generator_.get(),
&kLogicalDnsClusterResolverResponseGeneratorVtable);
std::string uri = absl::StrCat(
GetParam().use_xds_resolver() ? "xds" : "fake", ":///", server_name);
std::shared_ptr<ChannelCredentials> channel_creds =
GetParam().use_xds_credentials()
? experimental::XdsCredentials(CreateTlsFallbackCredentials())
: std::make_shared<SecureChannelCredentials>(
grpc_fake_transport_security_credentials_create());
return ::grpc::CreateCustomChannel(uri, channel_creds, args);
}
enum RpcService {
SERVICE_ECHO,
SERVICE_ECHO1,
SERVICE_ECHO2,
};
enum RpcMethod {
METHOD_ECHO,
METHOD_ECHO1,
METHOD_ECHO2,
};
struct RpcOptions {
RpcService service = SERVICE_ECHO;
RpcMethod method = METHOD_ECHO;
int timeout_ms = 1000;
bool wait_for_ready = false;
bool server_fail = false;
std::vector<std::pair<std::string, std::string>> metadata;
RpcOptions() {}
RpcOptions& set_rpc_service(RpcService rpc_service) {
service = rpc_service;
return *this;
}
RpcOptions& set_rpc_method(RpcMethod rpc_method) {
method = rpc_method;
return *this;
}
RpcOptions& set_timeout_ms(int rpc_timeout_ms) {
timeout_ms = rpc_timeout_ms;
return *this;
}
RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) {
wait_for_ready = rpc_wait_for_ready;
return *this;
}
RpcOptions& set_server_fail(bool rpc_server_fail) {
server_fail = rpc_server_fail;
return *this;
}
RpcOptions& set_metadata(
std::vector<std::pair<std::string, std::string>> rpc_metadata) {
metadata = std::move(rpc_metadata);
return *this;
}
};
template <typename Stub>
Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options,
ClientContext* context, EchoRequest& request,
EchoResponse* response) {
switch (rpc_options.method) {
case METHOD_ECHO:
return (*stub)->Echo(context, request, response);
case METHOD_ECHO1:
return (*stub)->Echo1(context, request, response);
case METHOD_ECHO2:
return (*stub)->Echo2(context, request, response);
}
}
void ResetBackendCounters(size_t start_index = 0, size_t stop_index = 0) {
if (stop_index == 0) stop_index = backends_.size();
for (size_t i = start_index; i < stop_index; ++i) {
backends_[i]->backend_service()->ResetCounters();
backends_[i]->backend_service1()->ResetCounters();
backends_[i]->backend_service2()->ResetCounters();
}
}
bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0,
const RpcOptions& rpc_options = RpcOptions()) {
if (stop_index == 0) stop_index = backends_.size();
for (size_t i = start_index; i < stop_index; ++i) {
switch (rpc_options.service) {
case SERVICE_ECHO:
if (backends_[i]->backend_service()->request_count() == 0) {
return false;
}
break;
case SERVICE_ECHO1:
if (backends_[i]->backend_service1()->request_count() == 0) {
return false;
}
break;
case SERVICE_ECHO2:
if (backends_[i]->backend_service2()->request_count() == 0) {
return false;
}
break;
}
}
return true;
}
void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
int* num_drops,
const RpcOptions& rpc_options = RpcOptions()) {
const Status status = SendRpc(rpc_options);
if (status.ok()) {
++*num_ok;
} else {
if (status.error_message() == "Call dropped by load balancing policy") {
++*num_drops;
} else {
++*num_failure;
}
}
++*num_total;
}
std::tuple<int, int, int> WaitForAllBackends(
size_t start_index = 0, size_t stop_index = 0, bool reset_counters = true,
const RpcOptions& rpc_options = RpcOptions(),
bool allow_failures = false) {
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
int num_total = 0;
while (!SeenAllBackends(start_index, stop_index, rpc_options)) {
SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops,
rpc_options);
}
if (reset_counters) ResetBackendCounters();
gpr_log(GPR_INFO,
"Performed %d warm up requests against the backends. "
"%d succeeded, %d failed, %d dropped.",
num_total, num_ok, num_failure, num_drops);
if (!allow_failures) EXPECT_EQ(num_failure, 0);
return std::make_tuple(num_ok, num_failure, num_drops);
}
void WaitForBackend(size_t backend_idx, bool reset_counters = true,
bool require_success = false) {
gpr_log(GPR_INFO, "========= WAITING FOR BACKEND %lu ==========",
static_cast<unsigned long>(backend_idx));
do {
Status status = SendRpc();
if (require_success) {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
}
} while (backends_[backend_idx]->backend_service()->request_count() == 0);
if (reset_counters) ResetBackendCounters();
gpr_log(GPR_INFO, "========= BACKEND %lu READY ==========",
static_cast<unsigned long>(backend_idx));
}
grpc_core::ServerAddressList CreateAddressListFromPortList(
const std::vector<int>& ports) {
grpc_core::ServerAddressList addresses;
for (int port : ports) {
absl::StatusOr<grpc_core::URI> lb_uri = grpc_core::URI::Parse(
absl::StrCat(ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", port));
GPR_ASSERT(lb_uri.ok());
grpc_resolved_address address;
GPR_ASSERT(grpc_parse_uri(*lb_uri, &address));
addresses.emplace_back(address.addr, address.len, nullptr);
}
return addresses;
}
void SetNextResolution(
const std::vector<int>& ports,
grpc_core::FakeResolverResponseGenerator* response_generator = nullptr) {
if (GetParam().use_xds_resolver()) return; // Not used with xds resolver.
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(ports);
grpc_error* error = GRPC_ERROR_NONE;
const char* service_config_json =
GetParam().enable_load_reporting()
? kDefaultServiceConfig
: kDefaultServiceConfigWithoutLoadReporting;
result.service_config =
grpc_core::ServiceConfig::Create(nullptr, service_config_json, &error);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
ASSERT_NE(result.service_config.get(), nullptr);
if (response_generator == nullptr) {
response_generator = response_generator_.get();
}
response_generator->SetResponse(std::move(result));
}
void SetNextResolutionForLbChannelAllBalancers(
const char* service_config_json = nullptr,
const char* expected_targets = nullptr) {
std::vector<int> ports;
for (size_t i = 0; i < balancers_.size(); ++i) {
ports.emplace_back(balancers_[i]->port());
}
SetNextResolutionForLbChannel(ports, service_config_json, expected_targets);
}
void SetNextResolutionForLbChannel(const std::vector<int>& ports,
const char* service_config_json = nullptr,
const char* expected_targets = nullptr) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(ports);
if (service_config_json != nullptr) {
grpc_error* error = GRPC_ERROR_NONE;
result.service_config = grpc_core::ServiceConfig::Create(
nullptr, service_config_json, &error);
ASSERT_NE(result.service_config.get(), nullptr);
ASSERT_EQ(error, GRPC_ERROR_NONE) << grpc_error_string(error);
}
if (expected_targets != nullptr) {
grpc_arg expected_targets_arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS),
const_cast<char*>(expected_targets));
result.args =
grpc_channel_args_copy_and_add(nullptr, &expected_targets_arg, 1);
}
lb_channel_response_generator_->SetResponse(std::move(result));
}
void SetNextReresolutionResponse(const std::vector<int>& ports) {
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(ports);
response_generator_->SetReresolutionResponse(std::move(result));
}
const std::vector<int> GetBackendPorts(size_t start_index = 0,
size_t stop_index = 0) const {
if (stop_index == 0) stop_index = backends_.size();
std::vector<int> backend_ports;
for (size_t i = start_index; i < stop_index; ++i) {
backend_ports.push_back(backends_[i]->port());
}
return backend_ports;
}
Status SendRpc(const RpcOptions& rpc_options = RpcOptions(),
EchoResponse* response = nullptr) {
const bool local_response = (response == nullptr);
if (local_response) response = new EchoResponse;
EchoRequest request;
ClientContext context;
for (const auto& metadata : rpc_options.metadata) {
context.AddMetadata(metadata.first, metadata.second);
}
if (rpc_options.timeout_ms != 0) {
context.set_deadline(
grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms));
}
if (rpc_options.wait_for_ready) context.set_wait_for_ready(true);
request.set_message(kRequestMessage);
if (rpc_options.server_fail) {
request.mutable_param()->mutable_expected_error()->set_code(
GRPC_STATUS_FAILED_PRECONDITION);
}
Status status;
switch (rpc_options.service) {
case SERVICE_ECHO:
status =
SendRpcMethod(&stub_, rpc_options, &context, request, response);
break;
case SERVICE_ECHO1:
status =
SendRpcMethod(&stub1_, rpc_options, &context, request, response);
break;
case SERVICE_ECHO2:
status =
SendRpcMethod(&stub2_, rpc_options, &context, request, response);
break;
}
if (local_response) delete response;
return status;
}
void CheckRpcSendOk(const size_t times = 1,
const RpcOptions& rpc_options = RpcOptions()) {
for (size_t i = 0; i < times; ++i) {
EchoResponse response;
const Status status = SendRpc(rpc_options, &response);
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
void CheckRpcSendFailure(
const size_t times = 1, const RpcOptions& rpc_options = RpcOptions(),
const StatusCode expected_error_code = StatusCode::OK) {
for (size_t i = 0; i < times; ++i) {
const Status status = SendRpc(rpc_options);
EXPECT_FALSE(status.ok());
if (expected_error_code != StatusCode::OK) {
EXPECT_EQ(expected_error_code, status.error_code());
}
}
}
static Listener BuildListener(const RouteConfiguration& route_config) {
HttpConnectionManager http_connection_manager;
*(http_connection_manager.mutable_route_config()) = route_config;
auto* filter = http_connection_manager.add_http_filters();
filter->set_name("router");
filter->mutable_typed_config()->PackFrom(
envoy::extensions::filters::http::router::v3::Router());
Listener listener;
listener.set_name(kServerName);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
return listener;
}
ClusterLoadAssignment BuildEdsResource(
const AdsServiceImpl::EdsResourceArgs& args,
const char* eds_service_name = kDefaultEdsServiceName) {
ClusterLoadAssignment assignment;
assignment.set_cluster_name(eds_service_name);
for (const auto& locality : args.locality_list) {
auto* endpoints = assignment.add_endpoints();
endpoints->mutable_load_balancing_weight()->set_value(locality.lb_weight);
endpoints->set_priority(locality.priority);
endpoints->mutable_locality()->set_region(kDefaultLocalityRegion);
endpoints->mutable_locality()->set_zone(kDefaultLocalityZone);
endpoints->mutable_locality()->set_sub_zone(locality.sub_zone);
for (size_t i = 0; i < locality.ports.size(); ++i) {
const int& port = locality.ports[i];
auto* lb_endpoints = endpoints->add_lb_endpoints();
if (locality.health_statuses.size() > i &&
locality.health_statuses[i] != HealthStatus::UNKNOWN) {
lb_endpoints->set_health_status(locality.health_statuses[i]);
}
auto* endpoint = lb_endpoints->mutable_endpoint();
auto* address = endpoint->mutable_address();
auto* socket_address = address->mutable_socket_address();
socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
socket_address->set_port_value(port);
}
}
if (!args.drop_categories.empty()) {
auto* policy = assignment.mutable_policy();
for (const auto& p : args.drop_categories) {
const std::string& name = p.first;
const uint32_t parts_per_million = p.second;
auto* drop_overload = policy->add_drop_overloads();
drop_overload->set_category(name);
auto* drop_percentage = drop_overload->mutable_drop_percentage();
drop_percentage->set_numerator(parts_per_million);
drop_percentage->set_denominator(args.drop_denominator);
}
}
return assignment;
}
void SetListenerAndRouteConfiguration(
int idx, Listener listener, const RouteConfiguration& route_config) {
auto* api_listener =
listener.mutable_api_listener()->mutable_api_listener();
HttpConnectionManager http_connection_manager;
api_listener->UnpackTo(&http_connection_manager);
if (GetParam().enable_rds_testing()) {
auto* rds = http_connection_manager.mutable_rds();
rds->set_route_config_name(kDefaultRouteConfigurationName);
rds->mutable_config_source()->mutable_ads();
balancers_[idx]->ads_service()->SetRdsResource(route_config);
} else {
*http_connection_manager.mutable_route_config() = route_config;
}
api_listener->PackFrom(http_connection_manager);
balancers_[idx]->ads_service()->SetLdsResource(listener);
}
void SetRouteConfiguration(int idx, const RouteConfiguration& route_config) {
if (GetParam().enable_rds_testing()) {
balancers_[idx]->ads_service()->SetRdsResource(route_config);
} else {
balancers_[idx]->ads_service()->SetLdsResource(
BuildListener(route_config));
}
}
AdsServiceImpl::ResponseState RouteConfigurationResponseState(int idx) const {
AdsServiceImpl* ads_service = balancers_[idx]->ads_service();
if (GetParam().enable_rds_testing()) {
return ads_service->rds_response_state();
}
return ads_service->lds_response_state();
}
public:
// This method could benefit test subclasses; to make it accessible
// via bind with a qualified name, it needs to be public.
void SetEdsResourceWithDelay(size_t i,
const ClusterLoadAssignment& assignment,
int delay_ms) {
GPR_ASSERT(delay_ms > 0);
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
balancers_[i]->ads_service()->SetEdsResource(assignment);
}
protected:
class ServerThread {
public:
explicit ServerThread(bool use_xds_enabled_server = false)
: port_(g_port_saver->GetPort()),
use_xds_enabled_server_(use_xds_enabled_server) {}
virtual ~ServerThread(){};
void Start() {
gpr_log(GPR_INFO, "starting %s server on port %d", Type(), port_);
GPR_ASSERT(!running_);
running_ = true;
StartAllServices();
grpc_core::Mutex mu;
// We need to acquire the lock here in order to prevent the notify_one
// by ServerThread::Serve from firing before the wait below is hit.
grpc_core::MutexLock lock(&mu);
grpc_core::CondVar cond;
thread_ = absl::make_unique<std::thread>(
std::bind(&ServerThread::Serve, this, &mu, &cond));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", Type());
}
void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) {
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
grpc_core::MutexLock lock(mu);
std::ostringstream server_address;
server_address << "localhost:" << port_;
if (use_xds_enabled_server_) {
experimental::XdsServerBuilder builder;
builder.AddListeningPort(server_address.str(), Credentials());
RegisterAllServices(&builder);
server_ = builder.BuildAndStart();
} else {
ServerBuilder builder;
builder.AddListeningPort(server_address.str(), Credentials());
RegisterAllServices(&builder);
server_ = builder.BuildAndStart();
}
cond->Signal();
}
void Shutdown() {
if (!running_) return;
gpr_log(GPR_INFO, "%s about to shutdown", Type());
ShutdownAllServices();
server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
thread_->join();
gpr_log(GPR_INFO, "%s shutdown completed", Type());
running_ = false;
}
virtual std::shared_ptr<ServerCredentials> Credentials() {
return std::make_shared<SecureServerCredentials>(
grpc_fake_transport_security_server_credentials_create());
}
int port() const { return port_; }
bool use_xds_enabled_server() const { return use_xds_enabled_server_; }
private:
virtual void RegisterAllServices(ServerBuilder* builder) = 0;
virtual void StartAllServices() = 0;
virtual void ShutdownAllServices() = 0;
virtual const char* Type() = 0;
const int port_;
std::unique_ptr<Server> server_;
std::unique_ptr<std::thread> thread_;
bool running_ = false;
const bool use_xds_enabled_server_;
};
class BackendServerThread : public ServerThread {
public:
explicit BackendServerThread(bool use_xds_enabled_server)
: ServerThread(use_xds_enabled_server) {}
BackendServiceImpl<::grpc::testing::EchoTestService::Service>*
backend_service() {
return &backend_service_;
}
BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>*
backend_service1() {
return &backend_service1_;
}
BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>*
backend_service2() {
return &backend_service2_;
}
std::shared_ptr<ServerCredentials> Credentials() override {
if (GetParam().use_xds_credentials()) {
if (use_xds_enabled_server()) {
// We are testing server's use of XdsServerCredentials
return experimental::XdsServerCredentials(
InsecureServerCredentials());
} else {
// We are testing client's use of XdsCredentials
std::string root_cert = ReadFile(kCaCertPath);
std::string identity_cert = ReadFile(kServerCertPath);
std::string private_key = ReadFile(kServerKeyPath);
std::vector<experimental::IdentityKeyCertPair>
identity_key_cert_pairs = {{private_key, identity_cert}};
auto certificate_provider = std::make_shared<
grpc::experimental::StaticDataCertificateProvider>(
root_cert, identity_key_cert_pairs);
grpc::experimental::TlsServerCredentialsOptions options(
certificate_provider);
options.watch_root_certs();
options.watch_identity_key_cert_pairs();
options.set_cert_request_type(
GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY);
return grpc::experimental::TlsServerCredentials(options);
}
}
return ServerThread::Credentials();
}
private:
void RegisterAllServices(ServerBuilder* builder) override {
builder->RegisterService(&backend_service_);
builder->RegisterService(&backend_service1_);
builder->RegisterService(&backend_service2_);
}
void StartAllServices() override {
backend_service_.Start();
backend_service1_.Start();
backend_service2_.Start();
}
void ShutdownAllServices() override {
backend_service_.Shutdown();
backend_service1_.Shutdown();
backend_service2_.Shutdown();
}
const char* Type() override { return "Backend"; }
BackendServiceImpl<::grpc::testing::EchoTestService::Service>
backend_service_;
BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>
backend_service1_;
BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>
backend_service2_;
};
class BalancerServerThread : public ServerThread {
public:
explicit BalancerServerThread(int client_load_reporting_interval = 0)
: ads_service_(new AdsServiceImpl()),
lrs_service_(new LrsServiceImpl(client_load_reporting_interval)) {}
AdsServiceImpl* ads_service() { return ads_service_.get(); }
LrsServiceImpl* lrs_service() { return lrs_service_.get(); }
private:
void RegisterAllServices(ServerBuilder* builder) override {
4 years ago
builder->RegisterService(ads_service_->v2_rpc_service());
builder->RegisterService(ads_service_->v3_rpc_service());
builder->RegisterService(lrs_service_->v2_rpc_service());
builder->RegisterService(lrs_service_->v3_rpc_service());
}
void StartAllServices() override {
ads_service_->Start();
lrs_service_->Start();
}
void ShutdownAllServices() override {
ads_service_->Shutdown();
lrs_service_->Shutdown();
}
const char* Type() override { return "Balancer"; }
std::shared_ptr<AdsServiceImpl> ads_service_;
std::shared_ptr<LrsServiceImpl> lrs_service_;
};
class LongRunningRpc {
public:
void StartRpc(grpc::testing::EchoTestService::Stub* stub) {
sender_thread_ = std::thread([this, stub]() {
EchoResponse response;
EchoRequest request;
request.mutable_param()->set_client_cancel_after_us(1 * 1000 * 1000);
request.set_message(kRequestMessage);
(void)stub->Echo(&context_, request, &response);
});
}
void CancelRpc() {
context_.TryCancel();
sender_thread_.join();
}
private:
std::thread sender_thread_;
ClientContext context_;
};
const size_t num_backends_;
const size_t num_balancers_;
const int client_load_reporting_interval_seconds_;
bool ipv6_only_ = false;
std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<grpc::testing::EchoTest1Service::Stub> stub1_;
std::unique_ptr<grpc::testing::EchoTest2Service::Stub> stub2_;
std::vector<std::unique_ptr<BackendServerThread>> backends_;
std::vector<std::unique_ptr<BalancerServerThread>> balancers_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
response_generator_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
lb_channel_response_generator_;
grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
logical_dns_cluster_resolver_response_generator_;
int xds_resource_does_not_exist_timeout_ms_ = 0;
absl::InlinedVector<grpc_arg, 2> xds_channel_args_to_add_;
grpc_channel_args xds_channel_args_;
Listener default_listener_;
RouteConfiguration default_route_config_;
Cluster default_cluster_;
bool use_xds_enabled_server_;
bool bootstrap_contents_from_env_var_;
};
class BasicTest : public XdsEnd2endTest {
public:
BasicTest() : XdsEnd2endTest(4, 1) {}
};
// Tests that the balancer sends the correct response to the client, and the
// client sends RPCs to the backends using the default child policy.
TEST_P(BasicTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcsPerAddress = 100;
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
// Check LB policy name for the channel.
EXPECT_EQ(
(GetParam().use_xds_resolver() ? "xds_cluster_manager_experimental"
: "xds_cluster_resolver_experimental"),
channel_->GetLoadBalancingPolicyName());
}
TEST_P(BasicTest, IgnoresUnhealthyEndpoints) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcsPerAddress = 100;
AdsServiceImpl::EdsResourceArgs args({
{"locality0",
GetBackendPorts(),
kDefaultLocalityWeight,
kDefaultLocalityPriority,
4 years ago
{HealthStatus::DRAINING}},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that trying to connect works without a call.
channel_->GetState(true /* try_to_connect */);
// We need to wait for all backends to come online.
WaitForAllBackends(/*start_index=*/1);
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * (num_backends_ - 1));
// Each backend should have gotten 100 requests.
for (size_t i = 1; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
}
// Tests that subchannel sharing works when the same backend is listed multiple
// times.
TEST_P(BasicTest, SameBackendListedMultipleTimes) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Same backend listed twice.
std::vector<int> ports(2, backends_[0]->port());
AdsServiceImpl::EdsResourceArgs args({
{"locality0", ports},
});
const size_t kNumRpcsPerAddress = 10;
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// We need to wait for the backend to come online.
WaitForBackend(0);
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * ports.size());
// Backend should have gotten 20 requests.
EXPECT_EQ(kNumRpcsPerAddress * ports.size(),
backends_[0]->backend_service()->request_count());
// And they should have come from a single client port, because of
// subchannel sharing.
EXPECT_EQ(1UL, backends_[0]->backend_service()->clients().size());
}
// Tests that RPCs will be blocked until a non-empty serverlist is received.
TEST_P(BasicTest, InitiallyEmptyServerlist) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
const int kCallDeadlineMs = kServerlistDelayMs * 2;
// First response is an empty serverlist, sent right away.
AdsServiceImpl::EdsResourceArgs::Locality empty_locality("locality0", {});
AdsServiceImpl::EdsResourceArgs args({
empty_locality,
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Send non-empty serverlist only after kServerlistDelayMs.
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts()},
});
std::thread delayed_resource_setter(std::bind(
&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), kServerlistDelayMs));
const auto t0 = system_clock::now();
// Client will block: LB will initially send empty serverlist.
CheckRpcSendOk(
1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true));
const auto ellapsed_ms =
std::chrono::duration_cast<std::chrono::milliseconds>(
system_clock::now() - t0);
// but eventually, the LB sends a serverlist update that allows the call to
// proceed. The call delay must be larger than the delay in sending the
// populated serverlist but under the call's deadline (which is enforced by
// the call's deadline).
EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs);
delayed_resource_setter.join();
}
// Tests that RPCs will fail with UNAVAILABLE instead of DEADLINE_EXCEEDED if
// all the servers are unreachable.
TEST_P(BasicTest, AllServersUnreachableFailFast) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumUnreachableServers = 5;
std::vector<int> ports;
for (size_t i = 0; i < kNumUnreachableServers; ++i) {
ports.push_back(g_port_saver->GetPort());
}
AdsServiceImpl::EdsResourceArgs args({
{"locality0", ports},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
const Status status = SendRpc();
// The error shouldn't be DEADLINE_EXCEEDED.
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
}
// Tests that RPCs fail when the backends are down, and will succeed again after
// the backends are restarted.
TEST_P(BasicTest, BackendsRestart) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Stop backends. RPCs should fail.
ShutdownAllBackends();
// Sending multiple failed requests instead of just one to ensure that the
// client notices that all backends are down before we restart them. If we
// didn't do this, then a single RPC could fail here due to the race condition
// between the LB pick and the GOAWAY from the chosen backend being shut down,
// which would not actually prove that the client noticed that all of the
// backends are down. Then, when we send another request below (which we
// expect to succeed), if the callbacks happen in the wrong order, the same
// race condition could happen again due to the client not yet having noticed
// that the backends were all down.
CheckRpcSendFailure(num_backends_);
// Restart all backends. RPCs should start succeeding again.
StartAllBackends();
CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true));
}
TEST_P(BasicTest, IgnoresDuplicateUpdates) {
const size_t kNumRpcsPerAddress = 100;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for all backends to come online.
WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server, but send an EDS update in
// between. If the update is not ignored, this will cause the
// round_robin policy to see an update, which will randomly reset its
// position in the address list.
for (size_t i = 0; i < kNumRpcsPerAddress; ++i) {
CheckRpcSendOk(2);
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
CheckRpcSendOk(2);
}
// Each backend should have gotten the right number of requests.
for (size_t i = 1; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
}
using XdsResolverOnlyTest = BasicTest;
TEST_P(XdsResolverOnlyTest, ResourceTypeVersionPersistsAcrossStreamRestarts) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Wait for backends to come online.
WaitForAllBackends(0, 1);
// Stop balancer.
balancers_[0]->Shutdown();
// Tell balancer to require minimum version 1 for all resource types.
balancers_[0]->ads_service()->SetResourceMinVersion(kLdsTypeUrl, 1);
balancers_[0]->ads_service()->SetResourceMinVersion(kRdsTypeUrl, 1);
balancers_[0]->ads_service()->SetResourceMinVersion(kCdsTypeUrl, 1);
balancers_[0]->ads_service()->SetResourceMinVersion(kEdsTypeUrl, 1);
// Update backend, just so we can be sure that the client has
// reconnected to the balancer.
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args2));
// Restart balancer.
balancers_[0]->Start();
// Make sure client has reconnected.
WaitForAllBackends(1, 2);
}
// Tests switching over from one cluster to another.
TEST_P(XdsResolverOnlyTest, ChangeClusters) {
const char* kNewClusterName = "new_cluster_name";
const char* kNewEdsServiceName = "new_eds_service_name";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends(0, 2);
// Populate new EDS resource.
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsServiceName));
// Populate new CDS resource.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Change RDS resource to point to new cluster.
RouteConfiguration new_route_config = default_route_config_;
5 years ago
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(kNewClusterName);
SetListenerAndRouteConfiguration(0, default_listener_, new_route_config);
// Wait for all new backends to be used.
std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
// Make sure no RPCs failed in the transition.
EXPECT_EQ(0, std::get<1>(counts));
}
// Tests that we go into TRANSIENT_FAILURE if the Cluster disappears.
TEST_P(XdsResolverOnlyTest, ClusterRemoved) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset CDS resource.
balancers_[0]->ads_service()->UnsetResource(kCdsTypeUrl, kDefaultClusterName);
// Wait for RPCs to start failing.
do {
} while (SendRpc(RpcOptions(), nullptr).ok());
// Make sure RPCs are still failing.
CheckRpcSendFailure(1000);
// Make sure we ACK'ed the update.
EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that we restart all xDS requests when we reestablish the ADS call.
TEST_P(XdsResolverOnlyTest, RestartsRequestsUponReconnection) {
// Manually configure use of RDS.
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* rds = http_connection_manager.mutable_rds();
rds->set_route_config_name(kDefaultRouteConfigurationName);
rds->mutable_config_source()->mutable_ads();
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
balancers_[0]->ads_service()->SetLdsResource(listener);
balancers_[0]->ads_service()->SetRdsResource(default_route_config_);
const char* kNewClusterName = "new_cluster_name";
const char* kNewEdsServiceName = "new_eds_service_name";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends(0, 2);
// Now shut down and restart the balancer. When the client
// reconnects, it should automatically restart the requests for all
// resource types.
balancers_[0]->Shutdown();
balancers_[0]->Start();
// Make sure things are still working.
CheckRpcSendOk(100);
// Populate new EDS resource.
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsServiceName));
// Populate new CDS resource.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Change RDS resource to point to new cluster.
RouteConfiguration new_route_config = default_route_config_;
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(kNewClusterName);
balancers_[0]->ads_service()->SetRdsResource(new_route_config);
// Wait for all new backends to be used.
std::tuple<int, int, int> counts = WaitForAllBackends(2, 4);
// Make sure no RPCs failed in the transition.
EXPECT_EQ(0, std::get<1>(counts));
}
TEST_P(XdsResolverOnlyTest, DefaultRouteSpecifiesSlashPrefix) {
RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_match()
->set_prefix("/");
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
}
TEST_P(XdsResolverOnlyTest, CircuitBreaking) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true");
constexpr size_t kMaxConcurrentRequests = 10;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
Cluster cluster = default_cluster_;
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Send exactly max_concurrent_requests long RPCs.
LongRunningRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].StartRpc(stub_.get());
}
// Wait for all RPCs to be in flight.
while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
kMaxConcurrentRequests) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
}
// Sending a RPC now should fail, the error message should tell us
// we hit the max concurrent requests limit and got dropped.
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
// Cancel one RPC to allow another one through
rpcs[0].CancelRpc();
status = SendRpc();
EXPECT_TRUE(status.ok());
for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
rpcs[i].CancelRpc();
}
// Make sure RPCs go to the correct backend:
EXPECT_EQ(kMaxConcurrentRequests + 1,
backends_[0]->backend_service()->request_count());
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING");
}
TEST_P(XdsResolverOnlyTest, CircuitBreakingMultipleChannelsShareCallCounter) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING", "true");
constexpr size_t kMaxConcurrentRequests = 10;
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
Cluster cluster = default_cluster_;
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Create second channel.
auto response_generator2 =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
auto channel2 = CreateChannel(
/*failover_timeout=*/0, /*server_name=*/kServerName,
response_generator2.get());
auto stub2 = grpc::testing::EchoTestService::NewStub(channel2);
// Set resolution results for both channels and for the xDS channel.
SetNextResolution({});
SetNextResolution({}, response_generator2.get());
SetNextResolutionForLbChannelAllBalancers();
// Send exactly max_concurrent_requests long RPCs, alternating between
// the two channels.
LongRunningRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].StartRpc(i % 2 == 0 ? stub_.get() : stub2.get());
}
// Wait for all RPCs to be in flight.
while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
kMaxConcurrentRequests) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
}
// Sending a RPC now should fail, the error message should tell us
// we hit the max concurrent requests limit and got dropped.
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
// Cancel one RPC to allow another one through
rpcs[0].CancelRpc();
status = SendRpc();
EXPECT_TRUE(status.ok());
for (size_t i = 1; i < kMaxConcurrentRequests; ++i) {
rpcs[i].CancelRpc();
}
// Make sure RPCs go to the correct backend:
EXPECT_EQ(kMaxConcurrentRequests + 1,
backends_[0]->backend_service()->request_count());
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING");
}
TEST_P(XdsResolverOnlyTest, CircuitBreakingDisabled) {
constexpr size_t kMaxConcurrentRequests = 10;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// Update CDS resource to set max concurrent request.
CircuitBreakers circuit_breaks;
Cluster cluster = default_cluster_;
auto* threshold = cluster.mutable_circuit_breakers()->add_thresholds();
threshold->set_priority(RoutingPriority::DEFAULT);
threshold->mutable_max_requests()->set_value(kMaxConcurrentRequests);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Send exactly max_concurrent_requests long RPCs.
LongRunningRpc rpcs[kMaxConcurrentRequests];
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].StartRpc(stub_.get());
}
// Wait for all RPCs to be in flight.
while (backends_[0]->backend_service()->RpcsWaitingForClientCancel() <
kMaxConcurrentRequests) {
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(1 * 1000, GPR_TIMESPAN)));
}
// Sending a RPC now should not fail as circuit breaking is disabled.
Status status = SendRpc();
EXPECT_TRUE(status.ok());
for (size_t i = 0; i < kMaxConcurrentRequests; ++i) {
rpcs[i].CancelRpc();
}
// Make sure RPCs go to the correct backend:
EXPECT_EQ(kMaxConcurrentRequests + 1,
backends_[0]->backend_service()->request_count());
}
TEST_P(XdsResolverOnlyTest, MultipleChannelsShareXdsClient) {
const char* kNewServerName = "new-server.example.com";
Listener listener = default_listener_;
listener.set_name(kNewServerName);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
WaitForAllBackends();
// Create second channel and tell it to connect to kNewServerName.
auto channel2 = CreateChannel(/*failover_timeout=*/0, kNewServerName);
channel2->GetState(/*try_to_connect=*/true);
ASSERT_TRUE(
channel2->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100)));
// Make sure there's only one client connected.
EXPECT_EQ(1UL, balancers_[0]->ads_service()->clients().size());
}
class XdsResolverLoadReportingOnlyTest : public XdsEnd2endTest {
public:
XdsResolverLoadReportingOnlyTest() : XdsEnd2endTest(4, 1, 3) {}
};
// Tests load reporting when switching over from one cluster to another.
TEST_P(XdsResolverLoadReportingOnlyTest, ChangeClusters) {
const char* kNewClusterName = "new_cluster_name";
const char* kNewEdsServiceName = "new_eds_service_name";
balancers_[0]->lrs_service()->set_cluster_names(
{kDefaultClusterName, kNewClusterName});
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// cluster kDefaultClusterName -> locality0 -> backends 0 and 1
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// cluster kNewClusterName -> locality1 -> backends 2 and 3
AdsServiceImpl::EdsResourceArgs args2({
{"locality1", GetBackendPorts(2, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsServiceName));
// CDS resource for kNewClusterName.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Wait for all backends to come online.
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(0, 2);
// The load report received at the balancer should be correct.
std::vector<ClientStats> load_report =
balancers_[0]->lrs_service()->WaitForLoadReport();
EXPECT_THAT(
load_report,
::testing::ElementsAre(::testing::AllOf(
::testing::Property(&ClientStats::cluster_name, kDefaultClusterName),
::testing::Property(
&ClientStats::locality_stats,
::testing::ElementsAre(::testing::Pair(
"locality0",
::testing::AllOf(
::testing::Field(&ClientStats::LocalityStats::
total_successful_requests,
num_ok),
::testing::Field(&ClientStats::LocalityStats::
total_requests_in_progress,
0UL),
::testing::Field(
&ClientStats::LocalityStats::total_error_requests,
num_failure),
::testing::Field(
&ClientStats::LocalityStats::total_issued_requests,
num_failure + num_ok))))),
::testing::Property(&ClientStats::total_dropped_requests,
num_drops))));
// Change RDS resource to point to new cluster.
RouteConfiguration new_route_config = default_route_config_;
new_route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->set_cluster(kNewClusterName);
SetListenerAndRouteConfiguration(0, default_listener_, new_route_config);
// Wait for all new backends to be used.
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(2, 4);
// The load report received at the balancer should be correct.
load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
EXPECT_THAT(
load_report,
::testing::ElementsAre(
::testing::AllOf(
::testing::Property(&ClientStats::cluster_name,
kDefaultClusterName),
::testing::Property(
&ClientStats::locality_stats,
::testing::ElementsAre(::testing::Pair(
"locality0",
::testing::AllOf(
::testing::Field(&ClientStats::LocalityStats::
total_successful_requests,
::testing::Lt(num_ok)),
::testing::Field(&ClientStats::LocalityStats::
total_requests_in_progress,
0UL),
::testing::Field(
&ClientStats::LocalityStats::total_error_requests,
::testing::Le(num_failure)),
::testing::Field(
&ClientStats::LocalityStats::
total_issued_requests,
::testing::Le(num_failure + num_ok)))))),
::testing::Property(&ClientStats::total_dropped_requests,
num_drops)),
::testing::AllOf(
::testing::Property(&ClientStats::cluster_name, kNewClusterName),
::testing::Property(
&ClientStats::locality_stats,
::testing::ElementsAre(::testing::Pair(
"locality1",
::testing::AllOf(
::testing::Field(&ClientStats::LocalityStats::
total_successful_requests,
::testing::Le(num_ok)),
::testing::Field(&ClientStats::LocalityStats::
total_requests_in_progress,
0UL),
::testing::Field(
&ClientStats::LocalityStats::total_error_requests,
::testing::Le(num_failure)),
::testing::Field(
&ClientStats::LocalityStats::
total_issued_requests,
::testing::Le(num_failure + num_ok)))))),
::testing::Property(&ClientStats::total_dropped_requests,
num_drops))));
int total_ok = 0;
int total_failure = 0;
for (const ClientStats& client_stats : load_report) {
total_ok += client_stats.total_successful_requests();
total_failure += client_stats.total_error_requests();
}
EXPECT_EQ(total_ok, num_ok);
EXPECT_EQ(total_failure, num_failure);
// The LRS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
}
using SecureNamingTest = BasicTest;
// Tests that secure naming check passes if target name is expected.
TEST_P(SecureNamingTest, TargetNameIsExpected) {
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr, "xds_server");
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
CheckRpcSendOk();
}
// Tests that secure naming check fails if target name is unexpected.
TEST_P(SecureNamingTest, TargetNameIsUnexpected) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()}, nullptr,
"incorrect_server_name");
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Make sure that we blow up (via abort() from the security connector) when
// the name from the balancer doesn't match expectations.
ASSERT_DEATH_IF_SUPPORTED({ CheckRpcSendOk(); }, "");
}
using LdsTest = BasicTest;
// Tests that LDS client should send a NACK if there is no API listener in the
// Listener in the LDS response.
TEST_P(LdsTest, NoApiListener) {
auto listener = default_listener_;
listener.clear_api_listener();
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("Listener has neither address nor ApiListener"));
}
// Tests that LDS client should send a NACK if the route_specifier in the
// http_connection_manager is neither inlined route_config nor RDS.
TEST_P(LdsTest, WrongRouteSpecifier) {
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
http_connection_manager.mutable_scoped_routes();
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"HttpConnectionManager neither has inlined route_config nor RDS."));
}
// Tests that LDS client should send a NACK if the rds message in the
// http_connection_manager is missing the config_source field.
TEST_P(LdsTest, RdsMissingConfigSource) {
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
http_connection_manager.mutable_rds()->set_route_config_name(
kDefaultRouteConfigurationName);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"HttpConnectionManager missing config_source for RDS."));
}
// Tests that LDS client should send a NACK if the rds message in the
// http_connection_manager has a config_source field that does not specify ADS.
TEST_P(LdsTest, RdsConfigSourceDoesNotSpecifyAds) {
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* rds = http_connection_manager.mutable_rds();
rds->set_route_config_name(kDefaultRouteConfigurationName);
rds->mutable_config_source()->mutable_self();
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"HttpConnectionManager ConfigSource for RDS does not specify ADS."));
}
// Tests that the NACK for multiple bad LDS resources includes both errors.
TEST_P(LdsTest, MultipleBadResources) {
constexpr char kServerName2[] = "server.other.com";
auto listener = default_listener_;
listener.clear_api_listener();
balancers_[0]->ads_service()->SetLdsResource(listener);
listener.set_name(kServerName2);
balancers_[0]->ads_service()->SetLdsResource(listener);
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
// Need to create a second channel to subscribe to a second LDS resource.
auto channel2 = CreateChannel(0, kServerName2);
auto stub2 = grpc::testing::EchoTestService::NewStub(channel2);
ClientContext context;
EchoRequest request;
request.set_message(kRequestMessage);
EchoResponse response;
grpc::Status status = stub2->Echo(&context, request, &response);
EXPECT_FALSE(status.ok());
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::AllOf(
::testing::HasSubstr(absl::StrCat(
kServerName, ": Listener has neither address nor ApiListener")),
::testing::HasSubstr(
absl::StrCat(kServerName2,
": Listener has neither address nor ApiListener"))));
}
// TODO(roth): Remove this test when we remove the
// GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION environment variable guard.
TEST_P(LdsTest, HttpFiltersEnabled) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we fail RPCs if there is no router filter.
TEST_P(LdsTest, FailRpcsIfNoHttpRouterFilter) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
SetNextResolutionForLbChannelAllBalancers();
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
http_connection_manager.clear_http_filters();
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
Status status = SendRpc();
EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(status.error_message(), "no xDS HTTP router filter configured");
// Wait until xDS server sees ACK.
while (balancers_[0]->ads_service()->lds_response_state().state ==
AdsServiceImpl::ResponseState::SENT) {
CheckRpcSendFailure();
}
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// TODO(lidiz): As part of adding the fault injection filter, add a test
// that we ignore filters after the router filter.
// Test that we NACK empty filter names.
TEST_P(LdsTest, RejectsEmptyHttpFilterName) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* filter = http_connection_manager.add_http_filters();
filter->mutable_typed_config()->PackFrom(Listener());
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (balancers_[0]->ads_service()->lds_response_state().state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("empty filter name at index 1"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK duplicate HTTP filter names.
TEST_P(LdsTest, RejectsDuplicateHttpFilterName) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
*http_connection_manager.add_http_filters() =
http_connection_manager.http_filters(0);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (balancers_[0]->ads_service()->lds_response_state().state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("duplicate HTTP filter name: router"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unknown filter types.
TEST_P(LdsTest, RejectsUnknownHttpFilterType) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* filter = http_connection_manager.add_http_filters();
filter->set_name("unknown");
filter->mutable_typed_config()->PackFrom(Listener());
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (balancers_[0]->ads_service()->lds_response_state().state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("no filter registered for config type "
"envoy.config.listener.v3.Listener"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we ignore optional unknown filter types.
TEST_P(LdsTest, IgnoresOptionalUnknownHttpFilterType) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* filter = http_connection_manager.add_http_filters();
filter->set_name("unknown");
filter->mutable_typed_config()->PackFrom(Listener());
filter->set_is_optional(true);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolutionForLbChannelAllBalancers();
WaitForAllBackends();
EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unparseable filter configs.
TEST_P(LdsTest, RejectsUnparseableHttpFilterType) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
auto* filter = http_connection_manager.add_http_filters();
filter->set_name("unknown");
filter->mutable_typed_config()->PackFrom(listener);
filter->mutable_typed_config()->set_type_url(
"type.googleapis.com/envoy.extensions.filters.http.router.v3.Router");
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, listener, default_route_config_);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (balancers_[0]->ads_service()->lds_response_state().state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"filter config for type "
"envoy.extensions.filters.http.router.v3.Router failed to parse"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
using LdsRdsTest = BasicTest;
// Tests that LDS client should send an ACK upon correct LDS response (with
// inlined RDS result).
TEST_P(LdsRdsTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
4 years ago
// Make sure we actually used the RPC service for the right version of xDS.
EXPECT_EQ(balancers_[0]->ads_service()->seen_v2_client(),
GetParam().use_v2());
EXPECT_NE(balancers_[0]->ads_service()->seen_v3_client(),
GetParam().use_v2());
}
// Tests that we go into TRANSIENT_FAILURE if the Listener is removed.
TEST_P(LdsRdsTest, ListenerRemoved) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
// We need to wait for all backends to come online.
WaitForAllBackends();
// Unset LDS resource.
balancers_[0]->ads_service()->UnsetResource(kLdsTypeUrl, kServerName);
// Wait for RPCs to start failing.
do {
} while (SendRpc(RpcOptions(), nullptr).ok());
// Make sure RPCs are still failing.
CheckRpcSendFailure(1000);
// Make sure we ACK'ed the update.
EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client ACKs but fails if matching domain can't be found in
// the LDS response.
TEST_P(LdsRdsTest, NoMatchedDomain) {
RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)->clear_domains();
route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
// Do a bit of polling, to allow the ACK to get to the ADS server.
channel_->WaitForConnected(grpc_timeout_milliseconds_to_deadline(100));
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should choose the virtual host with matching domain if
// multiple virtual hosts exist in the LDS response.
TEST_P(LdsRdsTest, ChooseMatchedDomain) {
RouteConfiguration route_config = default_route_config_;
*(route_config.add_virtual_hosts()) = route_config.virtual_hosts(0);
route_config.mutable_virtual_hosts(0)->clear_domains();
route_config.mutable_virtual_hosts(0)->add_domains("unmatched_domain");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should choose the last route in the virtual host if
// multiple routes exist in the LDS response.
TEST_P(LdsRdsTest, ChooseLastRoute) {
RouteConfiguration route_config = default_route_config_;
*(route_config.mutable_virtual_hosts(0)->add_routes()) =
route_config.virtual_hosts(0).routes(0);
route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->mutable_cluster_header();
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should ignore route which has query_parameters.
TEST_P(LdsRdsTest, RouteMatchHasQueryParameters) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_match()->add_query_parameters();
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should send a ACK if route match has a prefix
// that is either empty or a single slash
TEST_P(LdsRdsTest, RouteMatchHasValidPrefixEmptyOrSingleSlash) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("/");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
// Tests that LDS client should ignore route which has a path
// prefix string does not start with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has a prefix
// string with more than 2 slashes.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixExtraContent) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has a prefix
// string "//".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPrefixDoubleSlash) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("//");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// but it's empty.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathEmptyPath) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string does not start with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathNoLeadingSlash) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that has too many slashes; for example, ends with "/".
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathTooManySlashes) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that has only 1 slash: missing "/" between service and method.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathOnlyOneSlash) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that is missing service.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingService) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("//Echo1");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Tests that LDS client should ignore route which has path
// string that is missing method.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathMissingMethod) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/");
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No valid routes specified."));
}
// Test that LDS client should reject route which has invalid path regex.
TEST_P(LdsRdsTest, RouteMatchHasInvalidPathRegex) {
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->mutable_safe_regex()->set_regex("a[z-a]");
route1->mutable_route()->set_cluster(kNewCluster1Name);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"path matcher: Invalid regex string specified in matcher."));
}
// Tests that LDS client should send a NACK if route has an action other than
// RouteAction in the LDS response.
TEST_P(LdsRdsTest, RouteHasNoRouteAction) {
RouteConfiguration route_config = default_route_config_;
route_config.mutable_virtual_hosts(0)->mutable_routes(0)->mutable_redirect();
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("No RouteAction found in route."));
}
TEST_P(LdsRdsTest, RouteActionClusterHasEmptyClusterName) {
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster("");
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("RouteAction cluster contains empty cluster name."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetHasIncorrectTotalWeightSet) {
const size_t kWeight75 = 75;
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(kWeight75);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75 + 1);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"RouteAction weighted_cluster has incorrect total weight"));
}
TEST_P(LdsRdsTest, RouteActionWeightedClusterHasZeroTotalWeight) {
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(0);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(0);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"RouteAction weighted_cluster has no valid clusters specified."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasEmptyClusterName) {
const size_t kWeight75 = 75;
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name("");
weighted_cluster1->mutable_weight()->set_value(kWeight75);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"RouteAction weighted_cluster cluster contains empty cluster name."));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetClusterHasNoWeight) {
const size_t kWeight75 = 75;
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"RouteAction weighted_cluster cluster missing weight"));
}
TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRegex) {
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("header1");
header_matcher1->mutable_safe_regex_match()->set_regex("a[z-a]");
route1->mutable_route()->set_cluster(kNewCluster1Name);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"header matcher: Invalid regex string specified in matcher."));
}
TEST_P(LdsRdsTest, RouteHeaderMatchInvalidRange) {
const char* kNewCluster1Name = "new_cluster_1";
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("header1");
header_matcher1->mutable_range_match()->set_start(1001);
header_matcher1->mutable_range_match()->set_end(1000);
route1->mutable_route()->set_cluster(kNewCluster1Name);
SetRouteConfiguration(0, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr(
"header matcher: Invalid range specifier specified: end cannot be "
"smaller than start."));
}
// Tests that LDS client should choose the default route (with no matching
// specified) after unable to find a match with previous routes.
TEST_P(LdsRdsTest, XdsRoutingPathMatching) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEcho2Rpcs = 20;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route3->mutable_match()->set_path("/grpc.testing.EchoTest3Service/Echo3");
route3->mutable_route()->set_cluster(kDefaultClusterName);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 2);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true));
CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO2)
.set_rpc_method(METHOD_ECHO2)
.set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
for (size_t i = 0; i < 2; ++i) {
EXPECT_EQ(kNumEchoRpcs / 2,
backends_[i]->backend_service()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
}
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingPathMatchingCaseInsensitive) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
// First route will not match, since it's case-sensitive.
// Second route will match with same path.
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/GrPc.TeStInG.EcHoTeSt1SErViCe/EcHo1");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_path("/GrPc.TeStInG.EcHoTeSt1SErViCe/EcHo1");
route2->mutable_match()->mutable_case_sensitive()->set_value(false);
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingPrefixMatching) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEcho2Rpcs = 20;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 2);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(
kNumEcho1Rpcs,
RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
CheckRpcSendOk(
kNumEcho2Rpcs,
RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
for (size_t i = 0; i < 2; ++i) {
EXPECT_EQ(kNumEchoRpcs / 2,
backends_[i]->backend_service()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
}
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingPrefixMatchingCaseInsensitive) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
// First route will not match, since it's case-sensitive.
// Second route will match with same path.
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/GrPc.TeStInG.EcHoTeSt1SErViCe");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_prefix("/GrPc.TeStInG.EcHoTeSt1SErViCe");
route2->mutable_match()->mutable_case_sensitive()->set_value(false);
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingPathRegexMatching) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEcho2Rpcs = 20;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 2)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
// Will match "/grpc.testing.EchoTest1Service/"
route1->mutable_match()->mutable_safe_regex()->set_regex(".*1.*");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
// Will match "/grpc.testing.EchoTest2Service/"
route2->mutable_match()->mutable_safe_regex()->set_regex(".*2.*");
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 2);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(
kNumEcho1Rpcs,
RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true));
CheckRpcSendOk(
kNumEcho2Rpcs,
RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
for (size_t i = 0; i < 2; ++i) {
EXPECT_EQ(kNumEchoRpcs / 2,
backends_[i]->backend_service()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
}
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service2()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingPathRegexMatchingCaseInsensitive) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEcho1Rpcs = 10;
const size_t kNumEchoRpcs = 30;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
// First route will not match, since it's case-sensitive.
// Second route will match with same path.
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->mutable_safe_regex()->set_regex(
".*EcHoTeSt1SErViCe.*");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->mutable_safe_regex()->set_regex(
".*EcHoTeSt1SErViCe.*");
route2->mutable_match()->mutable_case_sensitive()->set_value(false);
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true));
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingWeightedCluster) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kNotUsedClusterName = "not_used_cluster";
const size_t kNumEcho1Rpcs = 1000;
const size_t kNumEchoRpcs = 10;
const size_t kWeight75 = 75;
const size_t kWeight25 = 25;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(kWeight75);
auto* weighted_cluster2 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster2->set_name(kNewCluster2Name);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
// Cluster with weight 0 will not be used.
auto* weighted_cluster3 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster3->set_name(kNotUsedClusterName);
weighted_cluster3->mutable_weight()->set_value(0);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75 + kWeight25);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 1);
WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
const int weight_75_request_count =
backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
const int weight_25_request_count =
backends_[2]->backend_service1()->request_count();
const double kErrorTolerance = 0.2;
EXPECT_THAT(
weight_75_request_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 + kErrorTolerance))));
// TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
::testing::AllOf(
::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 - kErrorToleranceSmallLoad)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, RouteActionWeightedTargetDefaultRoute) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEchoRpcs = 1000;
const size_t kWeight75 = 75;
const size_t kWeight25 = 25;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(kWeight75);
auto* weighted_cluster2 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster2->set_name(kNewCluster2Name);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75 + kWeight25);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(1, 3);
CheckRpcSendOk(kNumEchoRpcs);
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
const int weight_75_request_count =
backends_[1]->backend_service()->request_count();
const int weight_25_request_count =
backends_[2]->backend_service()->request_count();
const double kErrorTolerance = 0.2;
EXPECT_THAT(
weight_75_request_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEchoRpcs) *
kWeight75 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEchoRpcs) *
kWeight75 / 100 * (1 + kErrorTolerance))));
// TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
::testing::AllOf(
::testing::Ge(static_cast<double>(kNumEchoRpcs) * kWeight25 /
100 * (1 - kErrorToleranceSmallLoad)),
::testing::Le(static_cast<double>(kNumEchoRpcs) * kWeight25 /
100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateWeights) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kNewCluster3Name = "new_cluster_3";
const char* kNewEdsService3Name = "new_eds_service_name_3";
const size_t kNumEcho1Rpcs = 1000;
const size_t kNumEchoRpcs = 10;
const size_t kWeight75 = 75;
const size_t kWeight25 = 25;
const size_t kWeight50 = 50;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(kWeight75);
auto* weighted_cluster2 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster2->set_name(kNewCluster2Name);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75 + kWeight25);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 1);
WaitForAllBackends(1, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
const int weight_75_request_count =
backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
const int weight_25_request_count =
backends_[2]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
const double kErrorTolerance = 0.2;
EXPECT_THAT(
weight_75_request_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 + kErrorTolerance))));
// TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
::testing::AllOf(
::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 - kErrorToleranceSmallLoad)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 + kErrorToleranceSmallLoad))));
// Change Route Configurations: same clusters different weights.
weighted_cluster1->mutable_weight()->set_value(kWeight50);
weighted_cluster2->mutable_weight()->set_value(kWeight50);
// Change default route to a new cluster to help to identify when new polices
// are seen by the client.
default_route->mutable_route()->set_cluster(kNewCluster3Name);
SetRouteConfiguration(0, new_route_config);
ResetBackendCounters();
WaitForAllBackends(3, 4);
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(0, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
const int weight_50_request_count_1 =
backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
const int weight_50_request_count_2 =
backends_[2]->backend_service1()->request_count();
EXPECT_EQ(kNumEchoRpcs, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
EXPECT_THAT(
weight_50_request_count_1,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 + kErrorTolerance))));
EXPECT_THAT(
weight_50_request_count_2,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 + kErrorTolerance))));
}
TEST_P(LdsRdsTest, XdsRoutingWeightedClusterUpdateClusters) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kNewCluster3Name = "new_cluster_3";
const char* kNewEdsService3Name = "new_eds_service_name_3";
const size_t kNumEcho1Rpcs = 1000;
const size_t kNumEchoRpcs = 10;
const size_t kWeight75 = 75;
const size_t kWeight25 = 25;
const size_t kWeight50 = 50;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations.
RouteConfiguration new_route_config = default_route_config_;
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* weighted_cluster1 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster1->set_name(kNewCluster1Name);
weighted_cluster1->mutable_weight()->set_value(kWeight75);
auto* weighted_cluster2 =
route1->mutable_route()->mutable_weighted_clusters()->add_clusters();
weighted_cluster2->set_name(kDefaultClusterName);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
route1->mutable_route()
->mutable_weighted_clusters()
->mutable_total_weight()
->set_value(kWeight75 + kWeight25);
auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 1);
WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
int weight_25_request_count =
backends_[0]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
int weight_75_request_count =
backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
const double kErrorTolerance = 0.2;
EXPECT_THAT(
weight_75_request_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 + kErrorTolerance))));
// TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
const double kErrorToleranceSmallLoad = 0.3;
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
::testing::AllOf(
::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 - kErrorToleranceSmallLoad)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 + kErrorToleranceSmallLoad))));
// Change Route Configurations: new set of clusters with different weights.
weighted_cluster1->mutable_weight()->set_value(kWeight50);
weighted_cluster2->set_name(kNewCluster2Name);
weighted_cluster2->mutable_weight()->set_value(kWeight50);
SetRouteConfiguration(0, new_route_config);
ResetBackendCounters();
WaitForAllBackends(2, 3, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
const int weight_50_request_count_1 =
backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
const int weight_50_request_count_2 =
backends_[2]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service1()->request_count());
EXPECT_THAT(
weight_50_request_count_1,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 + kErrorTolerance))));
EXPECT_THAT(
weight_50_request_count_2,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight50 / 100 * (1 + kErrorTolerance))));
// Change Route Configurations.
weighted_cluster1->mutable_weight()->set_value(kWeight75);
weighted_cluster2->set_name(kNewCluster3Name);
weighted_cluster2->mutable_weight()->set_value(kWeight25);
SetRouteConfiguration(0, new_route_config);
ResetBackendCounters();
WaitForAllBackends(3, 4, true, RpcOptions().set_rpc_service(SERVICE_ECHO1));
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions().set_rpc_service(SERVICE_ECHO1));
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
weight_75_request_count = backends_[1]->backend_service1()->request_count();
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[3]->backend_service()->request_count());
weight_25_request_count = backends_[3]->backend_service1()->request_count();
EXPECT_THAT(
weight_75_request_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) *
kWeight75 / 100 * (1 + kErrorTolerance))));
// TODO(@donnadionne): Reduce tolerance: increased the tolerance to keep the
// test from flaking while debugging potential root cause.
gpr_log(GPR_INFO, "target_75 received %d rpcs and target_25 received %d rpcs",
weight_75_request_count, weight_25_request_count);
EXPECT_THAT(weight_25_request_count,
::testing::AllOf(
::testing::Ge(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 - kErrorToleranceSmallLoad)),
::testing::Le(static_cast<double>(kNumEcho1Rpcs) * kWeight25 /
100 * (1 + kErrorToleranceSmallLoad))));
}
TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClusters) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
const size_t kNumEchoRpcs = 5;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Send Route Configuration.
RouteConfiguration new_route_config = default_route_config_;
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(0, 1);
CheckRpcSendOk(kNumEchoRpcs);
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
// Change Route Configurations: new default cluster.
auto* default_route =
new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
default_route->mutable_route()->set_cluster(kNewClusterName);
SetRouteConfiguration(0, new_route_config);
WaitForAllBackends(1, 2);
CheckRpcSendOk(kNumEchoRpcs);
// Make sure RPCs all go to the correct backend.
EXPECT_EQ(kNumEchoRpcs, backends_[1]->backend_service()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingClusterUpdateClustersWithPickingDelays) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Bring down the current backend: 0, this will delay route picking time,
// resulting in un-committed RPCs.
ShutdownBackend(0);
// Send a RouteConfiguration with a default route that points to
// backend 0.
RouteConfiguration new_route_config = default_route_config_;
SetRouteConfiguration(0, new_route_config);
// Send exactly one RPC with no deadline and with wait_for_ready=true.
// This RPC will not complete until after backend 0 is started.
std::thread sending_rpc([this]() {
CheckRpcSendOk(1, RpcOptions().set_wait_for_ready(true).set_timeout_ms(0));
});
// Send a non-wait_for_ready RPC which should fail, this will tell us
// that the client has received the update and attempted to connect.
const Status status = SendRpc(RpcOptions().set_timeout_ms(0));
EXPECT_FALSE(status.ok());
// Send a update RouteConfiguration to use backend 1.
auto* default_route =
new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
default_route->mutable_route()->set_cluster(kNewClusterName);
SetRouteConfiguration(0, new_route_config);
// Wait for RPCs to go to the new backend: 1, this ensures that the client has
// processed the update.
WaitForAllBackends(1, 2, false, RpcOptions(), true);
// Bring up the previous backend: 0, this will allow the delayed RPC to
// finally call on_call_committed upon completion.
StartBackend(0);
sending_rpc.join();
// Make sure RPCs go to the correct backend:
EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
EXPECT_EQ(1, backends_[1]->backend_service()->request_count());
}
TEST_P(LdsRdsTest, XdsRoutingApplyXdsTimeout) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT", "true");
const int64_t kTimeoutMillis = 500;
const int64_t kTimeoutNano = kTimeoutMillis * 1000000;
const int64_t kTimeoutGrpcTimeoutHeaderMaxSecond = 1;
const int64_t kTimeoutMaxStreamDurationSecond = 2;
const int64_t kTimeoutHttpMaxStreamDurationSecond = 3;
const int64_t kTimeoutApplicationSecond = 4;
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kNewCluster3Name = "new_cluster_3";
const char* kNewEdsService3Name = "new_eds_service_name_3";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", {g_port_saver->GetPort()}},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", {g_port_saver->GetPort()}},
});
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Construct listener.
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
// Set up HTTP max_stream_duration of 3.5 seconds
auto* duration =
http_connection_manager.mutable_common_http_protocol_options()
->mutable_max_stream_duration();
duration->set_seconds(kTimeoutHttpMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
// Construct route config.
RouteConfiguration new_route_config = default_route_config_;
// route 1: Set max_stream_duration of 2.5 seconds, Set
// grpc_timeout_header_max of 1.5
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* max_stream_duration =
route1->mutable_route()->mutable_max_stream_duration();
duration = max_stream_duration->mutable_max_stream_duration();
duration->set_seconds(kTimeoutMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
duration = max_stream_duration->mutable_grpc_timeout_header_max();
duration->set_seconds(kTimeoutGrpcTimeoutHeaderMaxSecond);
duration->set_nanos(kTimeoutNano);
// route 2: Set max_stream_duration of 2.5 seconds
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
route2->mutable_route()->set_cluster(kNewCluster2Name);
max_stream_duration = route2->mutable_route()->mutable_max_stream_duration();
duration = max_stream_duration->mutable_max_stream_duration();
duration->set_seconds(kTimeoutMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
// route 3: No timeout values in route configuration
auto* route3 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route3->mutable_match()->set_path("/grpc.testing.EchoTestService/Echo");
route3->mutable_route()->set_cluster(kNewCluster3Name);
// Set listener and route config.
SetListenerAndRouteConfiguration(0, std::move(listener), new_route_config);
// Test grpc_timeout_header_max of 1.5 seconds applied
gpr_cycle_counter now = gpr_get_cycle_counter();
grpc_millis t0 = grpc_cycle_counter_to_millis_round_up(now);
grpc_millis t1 =
t0 + kTimeoutGrpcTimeoutHeaderMaxSecond * 1000 + kTimeoutMillis;
grpc_millis t2 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis;
CheckRpcSendFailure(1,
RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true)
.set_timeout_ms(kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
now = gpr_get_cycle_counter();
t0 = grpc_cycle_counter_to_millis_round_up(now);
EXPECT_GE(t0, t1);
EXPECT_LT(t0, t2);
// Test max_stream_duration of 2.5 seconds applied
now = gpr_get_cycle_counter();
t0 = grpc_cycle_counter_to_millis_round_up(now);
t1 = t0 + kTimeoutMaxStreamDurationSecond * 1000 + kTimeoutMillis;
t2 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis;
CheckRpcSendFailure(1,
RpcOptions()
.set_rpc_service(SERVICE_ECHO2)
.set_rpc_method(METHOD_ECHO2)
.set_wait_for_ready(true)
.set_timeout_ms(kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
now = gpr_get_cycle_counter();
t0 = grpc_cycle_counter_to_millis_round_up(now);
EXPECT_GE(t0, t1);
EXPECT_LT(t0, t2);
// Test http_stream_duration of 3.5 seconds applied
now = gpr_get_cycle_counter();
t0 = grpc_cycle_counter_to_millis_round_up(now);
t1 = t0 + kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis;
t2 = t0 + kTimeoutApplicationSecond * 1000 + kTimeoutMillis;
CheckRpcSendFailure(1,
RpcOptions().set_wait_for_ready(true).set_timeout_ms(
kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
now = gpr_get_cycle_counter();
t0 = grpc_cycle_counter_to_millis_round_up(now);
EXPECT_GE(t0, t1);
EXPECT_LT(t0, t2);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT");
}
TEST_P(LdsRdsTest, XdsRoutingXdsTimeoutDisabled) {
const int64_t kTimeoutMillis = 500;
const int64_t kTimeoutNano = kTimeoutMillis * 1000000;
const int64_t kTimeoutGrpcTimeoutHeaderMaxSecond = 1;
const int64_t kTimeoutApplicationSecond = 4;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
RouteConfiguration new_route_config = default_route_config_;
// route 1: Set grpc_timeout_header_max of 1.5
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
auto* max_stream_duration =
route1->mutable_route()->mutable_max_stream_duration();
auto* duration = max_stream_duration->mutable_grpc_timeout_header_max();
duration->set_seconds(kTimeoutGrpcTimeoutHeaderMaxSecond);
duration->set_nanos(kTimeoutNano);
SetRouteConfiguration(0, new_route_config);
// Test grpc_timeout_header_max of 1.5 seconds is not applied
gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec est_timeout_time = gpr_time_add(
t0, gpr_time_from_millis(
kTimeoutGrpcTimeoutHeaderMaxSecond * 1000 + kTimeoutMillis,
GPR_TIMESPAN));
CheckRpcSendFailure(1,
RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true)
.set_timeout_ms(kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
gpr_timespec timeout_time = gpr_now(GPR_CLOCK_MONOTONIC);
EXPECT_GT(gpr_time_cmp(timeout_time, est_timeout_time), 0);
}
TEST_P(LdsRdsTest, XdsRoutingHttpTimeoutDisabled) {
const int64_t kTimeoutMillis = 500;
const int64_t kTimeoutNano = kTimeoutMillis * 1000000;
const int64_t kTimeoutHttpMaxStreamDurationSecond = 3;
const int64_t kTimeoutApplicationSecond = 4;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
// Construct listener.
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
// Set up HTTP max_stream_duration of 3.5 seconds
auto* duration =
http_connection_manager.mutable_common_http_protocol_options()
->mutable_max_stream_duration();
duration->set_seconds(kTimeoutHttpMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
SetListenerAndRouteConfiguration(0, std::move(listener),
default_route_config_);
// Test http_stream_duration of 3.5 seconds is not applied
auto t0 = gpr_now(GPR_CLOCK_MONOTONIC);
auto est_timeout_time = gpr_time_add(
t0, gpr_time_from_millis(
kTimeoutHttpMaxStreamDurationSecond * 1000 + kTimeoutMillis,
GPR_TIMESPAN));
CheckRpcSendFailure(1,
RpcOptions().set_wait_for_ready(true).set_timeout_ms(
kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
auto timeout_time = gpr_now(GPR_CLOCK_MONOTONIC);
EXPECT_GT(gpr_time_cmp(timeout_time, est_timeout_time), 0);
}
TEST_P(LdsRdsTest, XdsRoutingApplyApplicationTimeoutWhenXdsTimeoutExplicit0) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT", "true");
const int64_t kTimeoutNano = 500000000;
const int64_t kTimeoutMaxStreamDurationSecond = 2;
const int64_t kTimeoutHttpMaxStreamDurationSecond = 3;
const int64_t kTimeoutApplicationSecond = 4;
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", {g_port_saver->GetPort()}},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Construct listener.
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
// Set up HTTP max_stream_duration of 3.5 seconds
auto* duration =
http_connection_manager.mutable_common_http_protocol_options()
->mutable_max_stream_duration();
duration->set_seconds(kTimeoutHttpMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
// Construct route config.
RouteConfiguration new_route_config = default_route_config_;
// route 1: Set max_stream_duration of 2.5 seconds, Set
// grpc_timeout_header_max of 0
auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto* max_stream_duration =
route1->mutable_route()->mutable_max_stream_duration();
duration = max_stream_duration->mutable_max_stream_duration();
duration->set_seconds(kTimeoutMaxStreamDurationSecond);
duration->set_nanos(kTimeoutNano);
duration = max_stream_duration->mutable_grpc_timeout_header_max();
duration->set_seconds(0);
duration->set_nanos(0);
// route 2: Set max_stream_duration to 0
auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2");
route2->mutable_route()->set_cluster(kNewCluster2Name);
max_stream_duration = route2->mutable_route()->mutable_max_stream_duration();
duration = max_stream_duration->mutable_max_stream_duration();
duration->set_seconds(0);
duration->set_nanos(0);
// Set listener and route config.
SetListenerAndRouteConfiguration(0, std::move(listener), new_route_config);
// Test application timeout is applied for route 1
auto t0 = system_clock::now();
CheckRpcSendFailure(1,
RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_wait_for_ready(true)
.set_timeout_ms(kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
auto ellapsed_nano_seconds =
std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
t0);
EXPECT_GT(ellapsed_nano_seconds.count(),
kTimeoutApplicationSecond * 1000000000);
// Test application timeout is applied for route 2
t0 = system_clock::now();
CheckRpcSendFailure(1,
RpcOptions()
.set_rpc_service(SERVICE_ECHO2)
.set_rpc_method(METHOD_ECHO2)
.set_wait_for_ready(true)
.set_timeout_ms(kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
ellapsed_nano_seconds = std::chrono::duration_cast<std::chrono::nanoseconds>(
system_clock::now() - t0);
EXPECT_GT(ellapsed_nano_seconds.count(),
kTimeoutApplicationSecond * 1000000000);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT");
}
TEST_P(LdsRdsTest, XdsRoutingApplyApplicationTimeoutWhenHttpTimeoutExplicit0) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT", "true");
const int64_t kTimeoutApplicationSecond = 4;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
auto listener = default_listener_;
HttpConnectionManager http_connection_manager;
listener.mutable_api_listener()->mutable_api_listener()->UnpackTo(
&http_connection_manager);
// Set up HTTP max_stream_duration to be explicit 0
auto* duration =
http_connection_manager.mutable_common_http_protocol_options()
->mutable_max_stream_duration();
duration->set_seconds(0);
duration->set_nanos(0);
listener.mutable_api_listener()->mutable_api_listener()->PackFrom(
http_connection_manager);
// Set listener and route config.
SetListenerAndRouteConfiguration(0, std::move(listener),
default_route_config_);
// Test application timeout is applied for route 1
auto t0 = system_clock::now();
CheckRpcSendFailure(1,
RpcOptions().set_wait_for_ready(true).set_timeout_ms(
kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
auto ellapsed_nano_seconds =
std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
t0);
EXPECT_GT(ellapsed_nano_seconds.count(),
kTimeoutApplicationSecond * 1000000000);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT");
}
// Test to ensure application-specified deadline won't be affected when
// the xDS config does not specify a timeout.
TEST_P(LdsRdsTest, XdsRoutingWithOnlyApplicationTimeout) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT", "true");
const int64_t kTimeoutApplicationSecond = 4;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {g_port_saver->GetPort()}},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
auto t0 = system_clock::now();
CheckRpcSendFailure(1,
RpcOptions().set_wait_for_ready(true).set_timeout_ms(
kTimeoutApplicationSecond * 1000),
StatusCode::DEADLINE_EXCEEDED);
auto ellapsed_nano_seconds =
std::chrono::duration_cast<std::chrono::nanoseconds>(system_clock::now() -
t0);
EXPECT_GT(ellapsed_nano_seconds.count(),
kTimeoutApplicationSecond * 1000000000);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT");
}
TEST_P(LdsRdsTest, XdsRoutingHeadersMatching) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
const size_t kNumEcho1Rpcs = 100;
const size_t kNumEchoRpcs = 5;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("header1");
header_matcher1->set_exact_match("POST,PUT,GET");
auto* header_matcher2 = route1->mutable_match()->add_headers();
header_matcher2->set_name("header2");
header_matcher2->mutable_safe_regex_match()->set_regex("[a-z]*");
auto* header_matcher3 = route1->mutable_match()->add_headers();
header_matcher3->set_name("header3");
header_matcher3->mutable_range_match()->set_start(1);
header_matcher3->mutable_range_match()->set_end(1000);
auto* header_matcher4 = route1->mutable_match()->add_headers();
header_matcher4->set_name("header4");
header_matcher4->set_present_match(false);
auto* header_matcher5 = route1->mutable_match()->add_headers();
header_matcher5->set_name("header5");
Add Missing test case: present match Interop reported an error for a present match and it is discovered this case is missing from unit test. We have test for present (false) and we have special case test but we don't have the normal present (true) test. We also had a bug in a our code: if (!value.has_value()) { if (header_matcher.type == XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT) { return !header_matcher.present_match; } else { // For all other header matcher types, we need the header value to // exist to consider matches. return false; } } switch (header_matcher.type) { case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::EXACT: return value.value() == header_matcher.string_matcher; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::REGEX: return RE2::FullMatch(value.value().data(), *header_matcher.regex_match); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::RANGE: int64_t int_value; if (!absl::SimpleAtoi(value.value(), &int_value)) { return false; } return int_value >= header_matcher.range_start && int_value < header_matcher.range_end; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PREFIX: return absl::StartsWith(value.value(), header_matcher.string_matcher); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::SUFFIX: return absl::EndsWith(value.value(), header_matcher.string_matcher); default: return false; } Note if header has value, we will go into the swtich but there is no case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT, which means we would have gone into default and decleared not a match. The same day this bug was discovered by interop test, https://github.com/grpc/grpc/pull/25122 was submitted and the refactoring fixed the bug. Nevertheless, I added the test case for the future.
4 years ago
header_matcher5->set_present_match(true);
auto* header_matcher6 = route1->mutable_match()->add_headers();
header_matcher6->set_name("header6");
Add Missing test case: present match Interop reported an error for a present match and it is discovered this case is missing from unit test. We have test for present (false) and we have special case test but we don't have the normal present (true) test. We also had a bug in a our code: if (!value.has_value()) { if (header_matcher.type == XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT) { return !header_matcher.present_match; } else { // For all other header matcher types, we need the header value to // exist to consider matches. return false; } } switch (header_matcher.type) { case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::EXACT: return value.value() == header_matcher.string_matcher; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::REGEX: return RE2::FullMatch(value.value().data(), *header_matcher.regex_match); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::RANGE: int64_t int_value; if (!absl::SimpleAtoi(value.value(), &int_value)) { return false; } return int_value >= header_matcher.range_start && int_value < header_matcher.range_end; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PREFIX: return absl::StartsWith(value.value(), header_matcher.string_matcher); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::SUFFIX: return absl::EndsWith(value.value(), header_matcher.string_matcher); default: return false; } Note if header has value, we will go into the swtich but there is no case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT, which means we would have gone into default and decleared not a match. The same day this bug was discovered by interop test, https://github.com/grpc/grpc/pull/25122 was submitted and the refactoring fixed the bug. Nevertheless, I added the test case for the future.
4 years ago
header_matcher6->set_prefix_match("/grpc");
auto* header_matcher7 = route1->mutable_match()->add_headers();
header_matcher7->set_name("header7");
header_matcher7->set_suffix_match(".cc");
header_matcher7->set_invert_match(true);
route1->mutable_route()->set_cluster(kNewClusterName);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
std::vector<std::pair<std::string, std::string>> metadata = {
Add Missing test case: present match Interop reported an error for a present match and it is discovered this case is missing from unit test. We have test for present (false) and we have special case test but we don't have the normal present (true) test. We also had a bug in a our code: if (!value.has_value()) { if (header_matcher.type == XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT) { return !header_matcher.present_match; } else { // For all other header matcher types, we need the header value to // exist to consider matches. return false; } } switch (header_matcher.type) { case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::EXACT: return value.value() == header_matcher.string_matcher; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::REGEX: return RE2::FullMatch(value.value().data(), *header_matcher.regex_match); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::RANGE: int64_t int_value; if (!absl::SimpleAtoi(value.value(), &int_value)) { return false; } return int_value >= header_matcher.range_start && int_value < header_matcher.range_end; case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PREFIX: return absl::StartsWith(value.value(), header_matcher.string_matcher); case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::SUFFIX: return absl::EndsWith(value.value(), header_matcher.string_matcher); default: return false; } Note if header has value, we will go into the swtich but there is no case XdsApi::Route::Matchers::HeaderMatcher::HeaderMatcherType::PRESENT, which means we would have gone into default and decleared not a match. The same day this bug was discovered by interop test, https://github.com/grpc/grpc/pull/25122 was submitted and the refactoring fixed the bug. Nevertheless, I added the test case for the future.
4 years ago
{"header1", "POST"},
{"header2", "blah"},
{"header3", "1"},
{"header5", "anything"},
{"header6", "/grpc.testing.EchoTest1Service/"},
{"header1", "PUT"},
{"header7", "grpc.java"},
{"header1", "GET"},
};
const auto header_match_rpc_options = RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_metadata(std::move(metadata));
// Make sure all backends are up.
WaitForAllBackends(0, 1);
WaitForAllBackends(1, 2, true, header_match_rpc_options);
// Send RPCs.
CheckRpcSendOk(kNumEchoRpcs);
CheckRpcSendOk(kNumEcho1Rpcs, header_match_rpc_options);
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialHeaderContentType) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
const size_t kNumEchoRpcs = 100;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("content-type");
header_matcher1->set_exact_match("notapplication/grpc");
route1->mutable_route()->set_cluster(kNewClusterName);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
auto* header_matcher2 = default_route->mutable_match()->add_headers();
header_matcher2->set_name("content-type");
header_matcher2->set_exact_match("application/grpc");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
// Make sure the backend is up.
WaitForAllBackends(0, 1);
// Send RPCs.
CheckRpcSendOk(kNumEchoRpcs);
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingSpecialCasesToIgnore) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const size_t kNumEchoRpcs = 100;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("grpc-foo-bin");
header_matcher1->set_present_match(true);
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_prefix("");
auto* header_matcher2 = route2->mutable_match()->add_headers();
header_matcher2->set_name("grpc-previous-rpc-attempts");
header_matcher2->set_present_match(true);
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
// Send headers which will mismatch each route
std::vector<std::pair<std::string, std::string>> metadata = {
{"grpc-foo-bin", "grpc-foo-bin"},
{"grpc-previous-rpc-attempts", "grpc-previous-rpc-attempts"},
};
WaitForAllBackends(0, 1);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
// Verify that only the default backend got RPCs since all previous routes
// were mismatched.
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(0, backends_[2]->backend_service()->request_count());
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingRuntimeFractionMatching) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
const size_t kNumRpcs = 1000;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()
->mutable_runtime_fraction()
->mutable_default_value()
->set_numerator(25);
route1->mutable_route()->set_cluster(kNewClusterName);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
WaitForAllBackends(0, 2);
CheckRpcSendOk(kNumRpcs);
const int default_backend_count =
backends_[0]->backend_service()->request_count();
const int matched_backend_count =
backends_[1]->backend_service()->request_count();
const double kErrorTolerance = 0.2;
EXPECT_THAT(
default_backend_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumRpcs) * 75 / 100 *
(1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumRpcs) * 75 / 100 *
(1 + kErrorTolerance))));
EXPECT_THAT(
matched_backend_count,
::testing::AllOf(::testing::Ge(static_cast<double>(kNumRpcs) * 25 / 100 *
(1 - kErrorTolerance)),
::testing::Le(static_cast<double>(kNumRpcs) * 25 / 100 *
(1 + kErrorTolerance))));
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingHeadersMatchingUnmatchCases) {
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kNewCluster3Name = "new_cluster_3";
const char* kNewEdsService3Name = "new_eds_service_name_3";
const size_t kNumEcho1Rpcs = 100;
const size_t kNumEchoRpcs = 5;
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
AdsServiceImpl::EdsResourceArgs args3({
{"locality0", GetBackendPorts(3, 4)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args3, kNewEdsService3Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
Cluster new_cluster3 = default_cluster_;
new_cluster3.set_name(kNewCluster3Name);
new_cluster3.mutable_eds_cluster_config()->set_service_name(
kNewEdsService3Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster3);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher1 = route1->mutable_match()->add_headers();
header_matcher1->set_name("header1");
header_matcher1->set_exact_match("POST");
route1->mutable_route()->set_cluster(kNewCluster1Name);
auto route2 = route_config.mutable_virtual_hosts(0)->add_routes();
route2->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher2 = route2->mutable_match()->add_headers();
header_matcher2->set_name("header2");
header_matcher2->mutable_range_match()->set_start(1);
header_matcher2->mutable_range_match()->set_end(1000);
route2->mutable_route()->set_cluster(kNewCluster2Name);
auto route3 = route_config.mutable_virtual_hosts(0)->add_routes();
route3->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
auto* header_matcher3 = route3->mutable_match()->add_headers();
header_matcher3->set_name("header3");
header_matcher3->mutable_safe_regex_match()->set_regex("[a-z]*");
route3->mutable_route()->set_cluster(kNewCluster3Name);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
// Send headers which will mismatch each route
std::vector<std::pair<std::string, std::string>> metadata = {
{"header1", "POST"},
{"header2", "1000"},
{"header3", "123"},
{"header1", "GET"},
};
WaitForAllBackends(0, 1);
CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_metadata(metadata));
CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions()
.set_rpc_service(SERVICE_ECHO1)
.set_rpc_method(METHOD_ECHO1)
.set_metadata(metadata));
// Verify that only the default backend got RPCs since all previous routes
// were mismatched.
for (size_t i = 1; i < 4; ++i) {
EXPECT_EQ(0, backends_[i]->backend_service()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[i]->backend_service2()->request_count());
}
EXPECT_EQ(kNumEchoRpcs, backends_[0]->backend_service()->request_count());
EXPECT_EQ(kNumEcho1Rpcs, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(LdsRdsTest, XdsRoutingChangeRoutesWithoutChangingClusters) {
const char* kNewClusterName = "new_cluster";
const char* kNewEdsServiceName = "new_eds_service_name";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsServiceName));
// Populate new CDS resources.
Cluster new_cluster = default_cluster_;
new_cluster.set_name(kNewClusterName);
new_cluster.mutable_eds_cluster_config()->set_service_name(
kNewEdsServiceName);
balancers_[0]->ads_service()->SetCdsResource(new_cluster);
// Populating Route Configurations for LDS.
RouteConfiguration route_config = default_route_config_;
auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0);
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/");
route1->mutable_route()->set_cluster(kNewClusterName);
auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes();
default_route->mutable_match()->set_prefix("");
default_route->mutable_route()->set_cluster(kDefaultClusterName);
SetRouteConfiguration(0, route_config);
// Make sure all backends are up and that requests for each RPC
// service go to the right backends.
WaitForAllBackends(0, 1, false);
WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
// Requests for services Echo and Echo2 should have gone to backend 0.
EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(1, backends_[0]->backend_service2()->request_count());
// Requests for service Echo1 should have gone to backend 1.
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(1, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service2()->request_count());
// Now send an update that changes the first route to match a
// different RPC service, and wait for the client to make the change.
route1->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/");
SetRouteConfiguration(0, route_config);
WaitForAllBackends(1, 2, true, RpcOptions().set_rpc_service(SERVICE_ECHO2));
// Now repeat the earlier test, making sure all traffic goes to the
// right place.
WaitForAllBackends(0, 1, false);
WaitForAllBackends(0, 1, false, RpcOptions().set_rpc_service(SERVICE_ECHO1));
WaitForAllBackends(1, 2, false, RpcOptions().set_rpc_service(SERVICE_ECHO2));
// Requests for services Echo and Echo1 should have gone to backend 0.
EXPECT_EQ(1, backends_[0]->backend_service()->request_count());
EXPECT_EQ(1, backends_[0]->backend_service1()->request_count());
EXPECT_EQ(0, backends_[0]->backend_service2()->request_count());
// Requests for service Echo2 should have gone to backend 1.
EXPECT_EQ(0, backends_[1]->backend_service()->request_count());
EXPECT_EQ(0, backends_[1]->backend_service1()->request_count());
EXPECT_EQ(1, backends_[1]->backend_service2()->request_count());
}
// Test that we NACK unknown filter types in VirtualHost.
TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInVirtualHost) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config =
route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(Listener());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("no filter registered for config type "
"envoy.config.listener.v3.Listener"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we ignore optional unknown filter types in VirtualHost.
TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInVirtualHost) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config =
route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
::envoy::config::route::v3::FilterConfig filter_config;
filter_config.mutable_config()->PackFrom(Listener());
filter_config.set_is_optional(true);
(*per_filter_config)["unknown"].PackFrom(filter_config);
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
WaitForAllBackends();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unparseable filter types in VirtualHost.
TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInVirtualHost) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config =
route_config.mutable_virtual_hosts(0)->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(
envoy::extensions::filters::http::router::v3::Router());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("router filter does not support config override"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unknown filter types in Route.
TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInRoute) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(Listener());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("no filter registered for config type "
"envoy.config.listener.v3.Listener"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we ignore optional unknown filter types in Route.
TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInRoute) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_typed_per_filter_config();
::envoy::config::route::v3::FilterConfig filter_config;
filter_config.mutable_config()->PackFrom(Listener());
filter_config.set_is_optional(true);
(*per_filter_config)["unknown"].PackFrom(filter_config);
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
WaitForAllBackends();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unparseable filter types in Route.
TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInRoute) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* per_filter_config = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(
envoy::extensions::filters::http::router::v3::Router());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("router filter does not support config override"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unknown filter types in ClusterWeight.
TEST_P(LdsRdsTest, RejectsUnknownHttpFilterTypeInClusterWeight) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* cluster_weight = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->mutable_weighted_clusters()
->add_clusters();
cluster_weight->set_name(kDefaultClusterName);
cluster_weight->mutable_weight()->set_value(100);
auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(Listener());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("no filter registered for config type "
"envoy.config.listener.v3.Listener"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we ignore optional unknown filter types in ClusterWeight.
TEST_P(LdsRdsTest, IgnoresOptionalUnknownHttpFilterTypeInClusterWeight) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* cluster_weight = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->mutable_weighted_clusters()
->add_clusters();
cluster_weight->set_name(kDefaultClusterName);
cluster_weight->mutable_weight()->set_value(100);
auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
::envoy::config::route::v3::FilterConfig filter_config;
filter_config.mutable_config()->PackFrom(Listener());
filter_config.set_is_optional(true);
(*per_filter_config)["unknown"].PackFrom(filter_config);
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
WaitForAllBackends();
EXPECT_EQ(RouteConfigurationResponseState(0).state,
AdsServiceImpl::ResponseState::ACKED);
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// Test that we NACK unparseable filter types in ClusterWeight.
TEST_P(LdsRdsTest, RejectsUnparseableHttpFilterTypeInClusterWeight) {
if (GetParam().use_v2()) return; // Filters supported in v3 only.
gpr_setenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "true");
RouteConfiguration route_config = default_route_config_;
auto* cluster_weight = route_config.mutable_virtual_hosts(0)
->mutable_routes(0)
->mutable_route()
->mutable_weighted_clusters()
->add_clusters();
cluster_weight->set_name(kDefaultClusterName);
cluster_weight->mutable_weight()->set_value(100);
auto* per_filter_config = cluster_weight->mutable_typed_per_filter_config();
(*per_filter_config)["unknown"].PackFrom(
envoy::extensions::filters::http::router::v3::Router());
SetListenerAndRouteConfiguration(0, default_listener_, route_config);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Wait until xDS server sees NACK.
do {
CheckRpcSendFailure();
} while (RouteConfigurationResponseState(0).state ==
AdsServiceImpl::ResponseState::SENT);
const auto response_state = RouteConfigurationResponseState(0);
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("router filter does not support config override"));
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION");
}
// TODO(lidiz): As part of adding the fault injection filter, add tests
// for overriding filter configs in the typed_per_filter_config fields in
// each of VirtualHost, Route, and ClusterWeight.
using CdsTest = BasicTest;
// Tests that CDS client should send an ACK upon correct CDS response.
TEST_P(CdsTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
(void)SendRpc();
EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
}
TEST_P(CdsTest, LogicalDNSClusterType) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
"true");
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Create Logical DNS Cluster
auto cluster = default_cluster_;
cluster.set_type(Cluster::LOGICAL_DNS);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Set Logical DNS result
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(GetBackendPorts(1, 2));
logical_dns_cluster_resolver_response_generator_->SetResponse(
std::move(result));
}
// Wait for traffic to go to backend 1.
WaitForBackend(1);
gpr_unsetenv(
"GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
}
TEST_P(CdsTest, AggregateClusterType) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
"true");
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Create Aggregate Cluster
auto cluster = default_cluster_;
CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
custom_cluster->set_name("envoy.clusters.aggregate");
ClusterConfig cluster_config;
cluster_config.add_clusters(kNewCluster1Name);
cluster_config.add_clusters(kNewCluster2Name);
custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Wait for traffic to go to backend 1.
WaitForBackend(1);
// Shutdown backend 1 and wait for all traffic to go to backend 2.
ShutdownBackend(1);
WaitForBackend(2);
EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
// Bring backend 1 back and ensure all traffic go back to it.
StartBackend(1);
WaitForBackend(1);
gpr_unsetenv(
"GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
}
TEST_P(CdsTest, AggregateClusterEdsToLogicalDns) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
"true");
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const char* kNewCluster1Name = "new_cluster_1";
const char* kNewEdsService1Name = "new_eds_service_name_1";
const char* kLogicalDNSClusterName = "logical_dns_cluster";
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args1({
{"locality0", GetBackendPorts(1, 2)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args1, kNewEdsService1Name));
// Populate new CDS resources.
Cluster new_cluster1 = default_cluster_;
new_cluster1.set_name(kNewCluster1Name);
new_cluster1.mutable_eds_cluster_config()->set_service_name(
kNewEdsService1Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster1);
// Create Logical DNS Cluster
auto logical_dns_cluster = default_cluster_;
logical_dns_cluster.set_name(kLogicalDNSClusterName);
logical_dns_cluster.set_type(Cluster::LOGICAL_DNS);
balancers_[0]->ads_service()->SetCdsResource(logical_dns_cluster);
// Create Aggregate Cluster
auto cluster = default_cluster_;
CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
custom_cluster->set_name("envoy.clusters.aggregate");
ClusterConfig cluster_config;
cluster_config.add_clusters(kNewCluster1Name);
cluster_config.add_clusters(kLogicalDNSClusterName);
custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Set Logical DNS result
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(GetBackendPorts(2, 3));
logical_dns_cluster_resolver_response_generator_->SetResponse(
std::move(result));
}
// Wait for traffic to go to backend 1.
WaitForBackend(1);
// Shutdown backend 1 and wait for all traffic to go to backend 2.
ShutdownBackend(1);
WaitForBackend(2);
EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
// Bring backend 1 back and ensure all traffic go back to it.
StartBackend(1);
WaitForBackend(1);
gpr_unsetenv(
"GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
}
TEST_P(CdsTest, AggregateClusterLogicalDnsToEds) {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER",
"true");
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const char* kNewCluster2Name = "new_cluster_2";
const char* kNewEdsService2Name = "new_eds_service_name_2";
const char* kLogicalDNSClusterName = "logical_dns_cluster";
// Populate new EDS resources.
AdsServiceImpl::EdsResourceArgs args2({
{"locality0", GetBackendPorts(2, 3)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args2, kNewEdsService2Name));
// Populate new CDS resources.
Cluster new_cluster2 = default_cluster_;
new_cluster2.set_name(kNewCluster2Name);
new_cluster2.mutable_eds_cluster_config()->set_service_name(
kNewEdsService2Name);
balancers_[0]->ads_service()->SetCdsResource(new_cluster2);
// Create Logical DNS Cluster
auto logical_dns_cluster = default_cluster_;
logical_dns_cluster.set_name(kLogicalDNSClusterName);
logical_dns_cluster.set_type(Cluster::LOGICAL_DNS);
balancers_[0]->ads_service()->SetCdsResource(logical_dns_cluster);
// Create Aggregate Cluster
auto cluster = default_cluster_;
CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
custom_cluster->set_name("envoy.clusters.aggregate");
ClusterConfig cluster_config;
cluster_config.add_clusters(kLogicalDNSClusterName);
cluster_config.add_clusters(kNewCluster2Name);
custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Set Logical DNS result
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Resolver::Result result;
result.addresses = CreateAddressListFromPortList(GetBackendPorts(1, 2));
logical_dns_cluster_resolver_response_generator_->SetResponse(
std::move(result));
}
// Wait for traffic to go to backend 1.
WaitForBackend(1);
// Shutdown backend 1 and wait for all traffic to go to backend 2.
ShutdownBackend(1);
WaitForBackend(2);
EXPECT_EQ(balancers_[0]->ads_service()->cds_response_state().state,
AdsServiceImpl::ResponseState::ACKED);
// Bring backend 1 back and ensure all traffic go back to it.
StartBackend(1);
WaitForBackend(1);
gpr_unsetenv(
"GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER");
}
// Test that CDS client should send a NACK if cluster type is Logical DNS but
// the feature is not yet supported.
TEST_P(CdsTest, LogicalDNSClusterTypeDisabled) {
auto cluster = default_cluster_;
cluster.set_type(Cluster::LOGICAL_DNS);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("DiscoveryType is not valid."));
}
// Test that CDS client should send a NACK if cluster type is AGGREGATE but
// the feature is not yet supported.
TEST_P(CdsTest, AggregateClusterTypeDisabled) {
auto cluster = default_cluster_;
CustomClusterType* custom_cluster = cluster.mutable_cluster_type();
custom_cluster->set_name("envoy.clusters.aggregate");
ClusterConfig cluster_config;
cluster_config.add_clusters("cluster1");
cluster_config.add_clusters("cluster2");
custom_cluster->mutable_typed_config()->PackFrom(cluster_config);
cluster.set_type(Cluster::LOGICAL_DNS);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("DiscoveryType is not valid."));
}
// Tests that CDS client should send a NACK if the cluster type in CDS response
// is unsupported.
TEST_P(CdsTest, UnsupportedClusterType) {
auto cluster = default_cluster_;
4 years ago
cluster.set_type(Cluster::STATIC);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("DiscoveryType is not valid."));
}
// Tests that the NACK for multiple bad resources includes both errors.
TEST_P(CdsTest, MultipleBadResources) {
constexpr char kClusterName2[] = "cluster_name_2";
// Use unsupported type for default cluster.
auto cluster = default_cluster_;
cluster.set_type(Cluster::STATIC);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Add second cluster with the same error.
cluster.set_name(kClusterName2);
balancers_[0]->ads_service()->SetCdsResource(cluster);
// Change RouteConfig to point to both clusters.
RouteConfiguration route_config = default_route_config_;
auto* route = route_config.mutable_virtual_hosts(0)->add_routes();
route->mutable_match()->set_prefix("");
route->mutable_route()->set_cluster(kClusterName2);
SetRouteConfiguration(0, route_config);
// Send RPC.
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::AllOf(
::testing::HasSubstr(absl::StrCat(
kDefaultClusterName, ": DiscoveryType is not valid.")),
::testing::HasSubstr(absl::StrCat(
kClusterName2, ": DiscoveryType is not valid."))));
}
// Tests that CDS client should send a NACK if the eds_config in CDS response is
// other than ADS.
TEST_P(CdsTest, WrongEdsConfig) {
auto cluster = default_cluster_;
cluster.mutable_eds_cluster_config()->mutable_eds_config()->mutable_self();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("EDS ConfigSource is not ADS."));
}
// Tests that CDS client should send a NACK if the lb_policy in CDS response is
// other than ROUND_ROBIN.
TEST_P(CdsTest, WrongLbPolicy) {
auto cluster = default_cluster_;
4 years ago
cluster.set_lb_policy(Cluster::LEAST_REQUEST);
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("LB policy is not supported."));
}
// Tests that CDS client should send a NACK if the lrs_server in CDS response is
// other than SELF.
TEST_P(CdsTest, WrongLrsServer) {
auto cluster = default_cluster_;
cluster.mutable_lrs_server()->mutable_ads();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("LRS ConfigSource is not self."));
}
class XdsSecurityTest : public BasicTest {
protected:
static void SetUpTestCase() {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", "true");
BasicTest::SetUpTestCase();
}
static void TearDownTestCase() {
BasicTest::TearDownTestCase();
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT");
}
void SetUp() override {
BasicTest::SetUp();
root_cert_ = ReadFile(kCaCertPath);
bad_root_cert_ = ReadFile(kBadClientCertPath);
identity_pair_ = ReadTlsIdentityPair(kClientKeyPath, kClientCertPath);
// TODO(yashykt): Use different client certs here instead of reusing server
// certs after https://github.com/grpc/grpc/pull/24876 is merged
fallback_identity_pair_ =
ReadTlsIdentityPair(kServerKeyPath, kServerCertPath);
bad_identity_pair_ =
ReadTlsIdentityPair(kBadClientKeyPath, kBadClientCertPath);
server_san_exact_.set_exact("*.test.google.fr");
server_san_prefix_.set_prefix("waterzooi.test.google");
server_san_suffix_.set_suffix("google.fr");
server_san_contains_.set_contains("google");
server_san_regex_.mutable_safe_regex()->mutable_google_re2();
server_san_regex_.mutable_safe_regex()->set_regex(
"(foo|waterzooi).test.google.(fr|be)");
bad_san_1_.set_exact("192.168.1.4");
bad_san_2_.set_exact("foo.test.google.in");
authenticated_identity_ = {"testclient"};
fallback_authenticated_identity_ = {"*.test.google.fr",
"waterzooi.test.google.be",
"*.test.youtube.com", "192.168.1.3"};
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolutionForLbChannelAllBalancers();
}
void TearDown() override {
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
BasicTest::TearDown();
}
// Sends CDS updates with the new security configuration and verifies that
// after propagation, this new configuration is used for connections. If \a
// identity_instance_name and \a root_instance_name are both empty,
// connections are expected to use fallback credentials.
void UpdateAndVerifyXdsSecurityConfiguration(
absl::string_view root_instance_name,
absl::string_view root_certificate_name,
absl::string_view identity_instance_name,
absl::string_view identity_certificate_name,
const std::vector<StringMatcher>& san_matchers,
const std::vector<std::string>& expected_authenticated_identity,
bool test_expects_failure = false) {
auto cluster = default_cluster_;
if (!identity_instance_name.empty() || !root_instance_name.empty()) {
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
if (!identity_instance_name.empty()) {
upstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_instance_name(std::string(identity_instance_name));
upstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_certificate_name(std::string(identity_certificate_name));
}
if (!root_instance_name.empty()) {
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_instance_name(std::string(root_instance_name));
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_certificate_name(std::string(root_certificate_name));
}
if (!san_matchers.empty()) {
auto* validation_context =
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_default_validation_context();
for (const auto& san_matcher : san_matchers) {
*validation_context->add_match_subject_alt_names() = san_matcher;
}
}
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
}
balancers_[0]->ads_service()->SetCdsResource(cluster);
// The updates might take time to have an effect, so use a retry loop.
constexpr int kRetryCount = 100;
int num_tries = 0;
for (; num_tries < kRetryCount; num_tries++) {
// Give some time for the updates to propagate.
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100));
if (test_expects_failure) {
// Restart the servers to force a reconnection so that previously
// connected subchannels are not used for the RPC.
ShutdownBackend(0);
StartBackend(0);
if (SendRpc().ok()) {
gpr_log(GPR_ERROR, "RPC succeeded. Failure expected. Trying again.");
continue;
}
} else {
WaitForBackend(0);
Status status = SendRpc();
if (!status.ok()) {
gpr_log(GPR_ERROR, "RPC failed. code=%d message=%s Trying again.",
status.error_code(), status.error_message().c_str());
continue;
}
if (backends_[0]->backend_service()->last_peer_identity() !=
expected_authenticated_identity) {
gpr_log(
GPR_ERROR,
"Expected client identity does not match. (actual) %s vs "
"(expected) %s Trying again.",
absl::StrJoin(
backends_[0]->backend_service()->last_peer_identity(), ",")
.c_str(),
absl::StrJoin(expected_authenticated_identity, ",").c_str());
continue;
}
}
break;
}
EXPECT_LT(num_tries, kRetryCount);
}
std::string root_cert_;
std::string bad_root_cert_;
grpc_core::PemKeyCertPairList identity_pair_;
grpc_core::PemKeyCertPairList fallback_identity_pair_;
grpc_core::PemKeyCertPairList bad_identity_pair_;
StringMatcher server_san_exact_;
StringMatcher server_san_prefix_;
StringMatcher server_san_suffix_;
StringMatcher server_san_contains_;
StringMatcher server_san_regex_;
StringMatcher bad_san_1_;
StringMatcher bad_san_2_;
std::vector<std::string> authenticated_identity_;
std::vector<std::string> fallback_authenticated_identity_;
};
TEST_P(XdsSecurityTest,
TLSConfigurationWithoutValidationContextCertificateProviderInstance) {
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"TLS configuration provided but no "
"validation_context_certificate_provider_instance found."));
}
TEST_P(
XdsSecurityTest,
MatchSubjectAltNamesProvidedWithoutValidationContextCertificateProviderInstance) {
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
auto* validation_context = upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_default_validation_context();
*validation_context->add_match_subject_alt_names() = server_san_exact_;
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"TLS configuration provided but no "
"validation_context_certificate_provider_instance found."));
}
TEST_P(
XdsSecurityTest,
TlsCertificateCertificateProviderInstanceWithoutValidationContextCertificateProviderInstance) {
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
upstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_instance_name(std::string("instance_name"));
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"TLS configuration provided but no "
"validation_context_certificate_provider_instance found."));
}
TEST_P(XdsSecurityTest, RegexSanMatcherDoesNotAllowIgnoreCase) {
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_instance_name(std::string("fake_plugin1"));
auto* validation_context = upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_default_validation_context();
StringMatcher matcher;
matcher.mutable_safe_regex()->mutable_google_re2();
matcher.mutable_safe_regex()->set_regex(
"(foo|waterzooi).test.google.(fr|be)");
matcher.set_ignore_case(true);
*validation_context->add_match_subject_alt_names() = matcher;
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->cds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"StringMatcher: ignore_case has no effect for SAFE_REGEX."));
}
TEST_P(XdsSecurityTest, UnknownRootCertificateProvider) {
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_instance_name("unknown");
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure(1, RpcOptions(), StatusCode::UNAVAILABLE);
}
TEST_P(XdsSecurityTest, UnknownIdentityCertificateProvider) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
auto cluster = default_cluster_;
auto* transport_socket = cluster.mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
UpstreamTlsContext upstream_tls_context;
upstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_instance_name("unknown");
upstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_instance_name("fake_plugin1");
transport_socket->mutable_typed_config()->PackFrom(upstream_tls_context);
balancers_[0]->ads_service()->SetCdsResource(cluster);
CheckRpcSendFailure(1, RpcOptions(), StatusCode::UNAVAILABLE);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithNoSanMatchers) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {}, authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithExactSanMatcher) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithPrefixSanMatcher) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_prefix_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithSuffixSanMatcher) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_suffix_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithContainsSanMatcher) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_contains_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRegexSanMatcher) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_regex_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithSanMatchersUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "fake_plugin1", "",
{server_san_exact_, server_san_prefix_}, authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {bad_san_1_, bad_san_2_}, {},
true /* failure */);
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "fake_plugin1", "",
{server_san_prefix_, server_san_regex_}, authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRootPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {bad_root_cert_, bad_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin2" /* bad root */, "",
"fake_plugin1", "", {}, {},
true /* failure */);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithIdentityPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {root_cert_, fallback_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin2",
"", {server_san_exact_},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithBothPluginsUpdated) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {bad_root_cert_, bad_identity_pair_}},
{"good", {root_cert_, fallback_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin2", "", "fake_plugin2",
"", {}, {}, true /* failure */);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_prefix_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin2", "good", "fake_plugin2", "good", {server_san_prefix_},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithRootCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"bad", {bad_root_cert_, bad_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_regex_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "fake_plugin1",
"", {server_san_regex_}, {},
true /* failure */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest,
TestMtlsConfigurationWithIdentityCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"bad", {bad_root_cert_, bad_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"bad", {server_san_exact_}, {},
true /* failure */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest,
TestMtlsConfigurationWithIdentityCertificateNameUpdateGoodCerts) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"good", {root_cert_, fallback_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"good", {server_san_exact_},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsConfigurationWithBothCertificateNamesUpdated) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"bad", {bad_root_cert_, bad_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "fake_plugin1",
"bad", {server_san_prefix_}, {},
true /* failure */);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_prefix_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsConfigurationWithNoSanMatchers) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "", {},
{} /* unauthenticated */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsConfigurationWithSanMatchers) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "", "",
{server_san_exact_, server_san_prefix_, server_san_regex_},
{} /* unauthenticated */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsConfigurationWithSanMatchersUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "", "", {server_san_exact_, server_san_prefix_},
{} /* unauthenticated */);
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "", "", {bad_san_1_, bad_san_2_},
{} /* unauthenticated */, true /* failure */);
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin1", "", "", "", {server_san_prefix_, server_san_regex_},
{} /* unauthenticated */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsConfigurationWithRootCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"bad", {bad_root_cert_, bad_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "bad", "", "",
{server_san_exact_}, {},
true /* failure */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsConfigurationWithRootPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {bad_root_cert_, bad_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
UpdateAndVerifyXdsSecurityConfiguration(
"fake_plugin2", "", "", "", {server_san_exact_}, {}, true /* failure */);
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestFallbackConfiguration) {
UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsToTls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestMtlsToFallback) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsToMtls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestTlsToFallback) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
fallback_authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestFallbackToMtls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
fallback_authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "fake_plugin1",
"", {server_san_exact_},
authenticated_identity_);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestFallbackToTls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
UpdateAndVerifyXdsSecurityConfiguration("", "", "", "", {},
fallback_authenticated_identity_);
UpdateAndVerifyXdsSecurityConfiguration("fake_plugin1", "", "", "",
{server_san_exact_},
{} /* unauthenticated */);
g_fake1_cert_data_map = nullptr;
}
TEST_P(XdsSecurityTest, TestFileWatcherCertificateProvider) {
UpdateAndVerifyXdsSecurityConfiguration("file_plugin", "", "file_plugin", "",
{server_san_exact_},
authenticated_identity_);
}
class XdsEnabledServerTest : public XdsEnd2endTest {
protected:
XdsEnabledServerTest()
: XdsEnd2endTest(1, 1, 100, true /* use_xds_enabled_server */) {}
void SetUp() override {
XdsEnd2endTest::SetUp();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
}
};
TEST_P(XdsEnabledServerTest, Basic) {
Listener listener;
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=",
ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
listener.mutable_address()->mutable_socket_address()->set_address(
ipv6_only_ ? "::1" : "127.0.0.1");
listener.mutable_address()->mutable_socket_address()->set_port_value(
backends_[0]->port());
listener.add_filter_chains();
balancers_[0]->ads_service()->SetLdsResource(listener);
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
backends_[0]->port()));
balancers_[0]->ads_service()->SetLdsResource(listener);
WaitForBackend(0);
CheckRpcSendOk();
}
TEST_P(XdsEnabledServerTest, BadLdsUpdateNoApiListenerNorAddress) {
Listener listener;
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=",
ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
listener.add_filter_chains();
balancers_[0]->ads_service()->SetLdsResource(listener);
CheckRpcSendFailure(1, RpcOptions().set_wait_for_ready(true));
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("Listener has neither address nor ApiListener"));
}
TEST_P(XdsEnabledServerTest, BadLdsUpdateBothApiListenerAndAddress) {
Listener listener;
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=",
ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
balancers_[0]->ads_service()->SetLdsResource(listener);
listener.mutable_address()->mutable_socket_address()->set_address(
ipv6_only_ ? "::1" : "127.0.0.1");
listener.mutable_address()->mutable_socket_address()->set_port_value(
backends_[0]->port());
auto* filter_chain = listener.add_filter_chains();
auto* transport_socket = filter_chain->mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
listener.mutable_api_listener();
balancers_[0]->ads_service()->SetLdsResource(listener);
CheckRpcSendFailure(1, RpcOptions().set_wait_for_ready(true));
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(
response_state.error_message,
::testing::HasSubstr("Listener has both address and ApiListener"));
}
class XdsServerSecurityTest : public XdsEnd2endTest {
protected:
XdsServerSecurityTest()
: XdsEnd2endTest(1, 1, 100, true /* use_xds_enabled_server */) {}
static void SetUpTestCase() {
gpr_setenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", "true");
XdsEnd2endTest::SetUpTestCase();
}
static void TearDownTestCase() {
XdsEnd2endTest::TearDownTestCase();
gpr_unsetenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT");
}
void SetUp() override {
XdsEnd2endTest::SetUp();
root_cert_ = ReadFile(kCaCertPath);
bad_root_cert_ = ReadFile(kBadClientCertPath);
identity_pair_ = ReadTlsIdentityPair(kServerKeyPath, kServerCertPath);
bad_identity_pair_ =
ReadTlsIdentityPair(kBadClientKeyPath, kBadClientCertPath);
identity_pair_2_ = ReadTlsIdentityPair(kClientKeyPath, kClientCertPath);
server_authenticated_identity_ = {"*.test.google.fr",
"waterzooi.test.google.be",
"*.test.youtube.com", "192.168.1.3"};
server_authenticated_identity_2_ = {"testclient"};
client_authenticated_identity_ = {"*.test.google.fr",
"waterzooi.test.google.be",
"*.test.youtube.com", "192.168.1.3"};
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
}
void TearDown() override {
g_fake1_cert_data_map = nullptr;
g_fake2_cert_data_map = nullptr;
XdsEnd2endTest::TearDown();
}
void SetLdsUpdate(absl::string_view root_instance_name,
absl::string_view root_certificate_name,
absl::string_view identity_instance_name,
absl::string_view identity_certificate_name,
bool require_client_certificates) {
Listener listener;
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=127.0.0.1:",
backends_[0]->port()));
listener.mutable_address()->mutable_socket_address()->set_address(
"127.0.0.1");
listener.mutable_address()->mutable_socket_address()->set_port_value(
backends_[0]->port());
auto* filter_chain = listener.add_filter_chains();
if (!identity_instance_name.empty()) {
auto* transport_socket = filter_chain->mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
DownstreamTlsContext downstream_tls_context;
downstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_instance_name(std::string(identity_instance_name));
downstream_tls_context.mutable_common_tls_context()
->mutable_tls_certificate_certificate_provider_instance()
->set_certificate_name(std::string(identity_certificate_name));
if (!root_instance_name.empty()) {
downstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_instance_name(std::string(root_instance_name));
downstream_tls_context.mutable_common_tls_context()
->mutable_combined_validation_context()
->mutable_validation_context_certificate_provider_instance()
->set_certificate_name(std::string(root_certificate_name));
downstream_tls_context.mutable_require_client_certificate()->set_value(
require_client_certificates);
}
transport_socket->mutable_typed_config()->PackFrom(
downstream_tls_context);
}
balancers_[0]->ads_service()->SetLdsResource(listener);
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=[::1]:",
backends_[0]->port()));
listener.mutable_address()->mutable_socket_address()->set_address("[::1]");
balancers_[0]->ads_service()->SetLdsResource(listener);
}
std::shared_ptr<grpc::Channel> CreateMtlsChannel() {
ChannelArguments args;
// Override target name for host name check
args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
ipv6_only_ ? "::1" : "127.0.0.1");
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
std::string uri = absl::StrCat(
ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
// TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
grpc_tls_credentials_options* options =
grpc_tls_credentials_options_create();
grpc_tls_credentials_options_set_server_verification_option(
options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
grpc_tls_credentials_options_set_certificate_provider(
options,
grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
ReadFile(kCaCertPath),
ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
.get());
grpc_tls_credentials_options_watch_root_certs(options);
grpc_tls_credentials_options_watch_identity_key_cert_pairs(options);
grpc_tls_server_authorization_check_config* check_config =
grpc_tls_server_authorization_check_config_create(
nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
grpc_tls_credentials_options_set_server_authorization_check_config(
options, check_config);
auto channel_creds = std::make_shared<SecureChannelCredentials>(
grpc_tls_credentials_create(options));
grpc_tls_server_authorization_check_config_release(check_config);
return CreateCustomChannel(uri, channel_creds, args);
}
std::shared_ptr<grpc::Channel> CreateTlsChannel() {
ChannelArguments args;
// Override target name for host name check
args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
ipv6_only_ ? "::1" : "127.0.0.1");
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
std::string uri = absl::StrCat(
ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
// TODO(yashykt): Switch to using C++ API once b/173823806 is fixed.
grpc_tls_credentials_options* options =
grpc_tls_credentials_options_create();
grpc_tls_credentials_options_set_server_verification_option(
options, GRPC_TLS_SKIP_HOSTNAME_VERIFICATION);
grpc_tls_credentials_options_set_certificate_provider(
options,
grpc_core::MakeRefCounted<grpc_core::StaticDataCertificateProvider>(
ReadFile(kCaCertPath),
ReadTlsIdentityPair(kServerKeyPath, kServerCertPath))
.get());
grpc_tls_credentials_options_watch_root_certs(options);
grpc_tls_server_authorization_check_config* check_config =
grpc_tls_server_authorization_check_config_create(
nullptr, ServerAuthCheckSchedule, nullptr, nullptr);
grpc_tls_credentials_options_set_server_authorization_check_config(
options, check_config);
auto channel_creds = std::make_shared<SecureChannelCredentials>(
grpc_tls_credentials_create(options));
grpc_tls_server_authorization_check_config_release(check_config);
return CreateCustomChannel(uri, channel_creds, args);
}
std::shared_ptr<grpc::Channel> CreateInsecureChannel() {
ChannelArguments args;
// Override target name for host name check
args.SetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG,
ipv6_only_ ? "::1" : "127.0.0.1");
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, 1);
std::string uri = absl::StrCat(
ipv6_only_ ? "ipv6:[::1]:" : "ipv4:127.0.0.1:", backends_[0]->port());
return CreateCustomChannel(uri, InsecureChannelCredentials(), args);
}
void SendRpc(std::function<std::shared_ptr<grpc::Channel>()> channel_creator,
std::vector<std::string> expected_server_identity,
std::vector<std::string> expected_client_identity,
bool test_expects_failure = false) {
gpr_log(GPR_INFO, "Sending RPC");
int num_tries = 0;
constexpr int kRetryCount = 10;
for (; num_tries < kRetryCount; num_tries++) {
auto channel = channel_creator();
auto stub = grpc::testing::EchoTestService::NewStub(channel);
ClientContext context;
context.set_wait_for_ready(true);
context.set_deadline(grpc_timeout_milliseconds_to_deadline(2000));
EchoRequest request;
request.set_message(kRequestMessage);
EchoResponse response;
Status status = stub->Echo(&context, request, &response);
if (test_expects_failure) {
if (status.ok()) {
gpr_log(GPR_ERROR, "RPC succeeded. Failure expected. Trying again.");
continue;
}
} else {
if (!status.ok()) {
gpr_log(GPR_ERROR, "RPC failed. code=%d message=%s Trying again.",
status.error_code(), status.error_message().c_str());
continue;
}
EXPECT_EQ(response.message(), kRequestMessage);
std::vector<std::string> peer_identity;
for (const auto& entry : context.auth_context()->GetPeerIdentity()) {
peer_identity.emplace_back(
std::string(entry.data(), entry.size()).c_str());
}
if (peer_identity != expected_server_identity) {
gpr_log(GPR_ERROR,
"Expected server identity does not match. (actual) %s vs "
"(expected) %s Trying again.",
absl::StrJoin(peer_identity, ",").c_str(),
absl::StrJoin(expected_server_identity, ",").c_str());
continue;
}
if (backends_[0]->backend_service()->last_peer_identity() !=
expected_client_identity) {
gpr_log(
GPR_ERROR,
"Expected client identity does not match. (actual) %s vs "
"(expected) %s Trying again.",
absl::StrJoin(
backends_[0]->backend_service()->last_peer_identity(), ",")
.c_str(),
absl::StrJoin(expected_client_identity, ",").c_str());
continue;
}
}
break;
}
EXPECT_LT(num_tries, kRetryCount);
}
std::string root_cert_;
std::string bad_root_cert_;
grpc_core::PemKeyCertPairList identity_pair_;
grpc_core::PemKeyCertPairList bad_identity_pair_;
grpc_core::PemKeyCertPairList identity_pair_2_;
std::vector<std::string> server_authenticated_identity_;
std::vector<std::string> server_authenticated_identity_2_;
std::vector<std::string> client_authenticated_identity_;
};
TEST_P(XdsServerSecurityTest, TlsConfigurationWithoutRootProviderInstance) {
Listener listener;
listener.set_name(
absl::StrCat("grpc/server?xds.resource.listening_address=",
ipv6_only_ ? "[::1]:" : "127.0.0.1:", backends_[0]->port()));
balancers_[0]->ads_service()->SetLdsResource(listener);
auto* socket_address = listener.mutable_address()->mutable_socket_address();
socket_address->set_address(ipv6_only_ ? "::1" : "127.0.0.1");
socket_address->set_port_value(backends_[0]->port());
auto* filter_chain = listener.add_filter_chains();
auto* transport_socket = filter_chain->mutable_transport_socket();
transport_socket->set_name("envoy.transport_sockets.tls");
DownstreamTlsContext downstream_tls_context;
transport_socket->mutable_typed_config()->PackFrom(downstream_tls_context);
balancers_[0]->ads_service()->SetLdsResource(listener);
CheckRpcSendFailure(1, RpcOptions().set_wait_for_ready(true));
const auto response_state =
balancers_[0]->ads_service()->lds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr(
"TLS configuration provided but no "
"tls_certificate_certificate_provider_instance found."));
}
TEST_P(XdsServerSecurityTest, UnknownIdentityCertificateProvider) {
SetLdsUpdate("", "", "unknown", "", false);
SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
true /* test_expects_failure */);
}
TEST_P(XdsServerSecurityTest, UnknownRootCertificateProvider) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
SetLdsUpdate("unknown", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
true /* test_expects_failure */);
}
TEST_P(XdsServerSecurityTest, TestMtls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithRootPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {bad_root_cert_, bad_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin2", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
true /* test_expects_failure */);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithIdentityPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {root_cert_, identity_pair_2_}}};
g_fake2_cert_data_map = &fake2_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin1", "", "fake_plugin2", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_2_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithBothPluginsUpdated) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"good", {root_cert_, identity_pair_2_}},
{"", {bad_root_cert_, bad_identity_pair_}}};
g_fake2_cert_data_map = &fake2_cert_map;
SetLdsUpdate("fake_plugin2", "", "fake_plugin2", "", true);
SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
true /* test_expects_failure */);
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin2", "good", "fake_plugin2", "good", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_2_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithRootCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"bad", {bad_root_cert_, bad_identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin1", "bad", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); }, {}, {},
true /* test_expects_failure */);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithIdentityCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"good", {root_cert_, identity_pair_2_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "good", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_2_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsWithBothCertificateNamesUpdated) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"good", {root_cert_, identity_pair_2_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("fake_plugin1", "good", "fake_plugin1", "good", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_2_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsNotRequiringButProvidingClientCerts) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestMtlsNotRequiringAndNotProvidingClientCerts) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
}
TEST_P(XdsServerSecurityTest, TestTls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
}
TEST_P(XdsServerSecurityTest, TestTlsWithIdentityPluginUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
FakeCertificateProvider::CertDataMap fake2_cert_map = {
{"", {root_cert_, identity_pair_2_}}};
g_fake2_cert_data_map = &fake2_cert_map;
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
SetLdsUpdate("", "", "fake_plugin2", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_2_, {});
}
TEST_P(XdsServerSecurityTest, TestTlsWithIdentityCertificateNameUpdate) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}},
{"good", {root_cert_, identity_pair_2_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
SetLdsUpdate("", "", "fake_plugin1", "good", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_2_, {});
}
TEST_P(XdsServerSecurityTest, TestFallback) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "", "", false);
SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
}
TEST_P(XdsServerSecurityTest, TestMtlsToTls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
true /* test_expects_failure */);
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
}
TEST_P(XdsServerSecurityTest, TestTlsToMtls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateTlsChannel(); }, {}, {},
true /* test_expects_failure */);
}
TEST_P(XdsServerSecurityTest, TestMtlsToFallback) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
SetLdsUpdate("", "", "", "", false);
SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
}
TEST_P(XdsServerSecurityTest, TestFallbackToMtls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "", "", false);
SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
SetLdsUpdate("fake_plugin1", "", "fake_plugin1", "", true);
SendRpc([this]() { return CreateMtlsChannel(); },
server_authenticated_identity_, client_authenticated_identity_);
}
TEST_P(XdsServerSecurityTest, TestTlsToFallback) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
SetLdsUpdate("", "", "", "", false);
SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
}
TEST_P(XdsServerSecurityTest, TestFallbackToTls) {
FakeCertificateProvider::CertDataMap fake1_cert_map = {
{"", {root_cert_, identity_pair_}}};
g_fake1_cert_data_map = &fake1_cert_map;
SetLdsUpdate("", "", "", "", false);
SendRpc([this]() { return CreateInsecureChannel(); }, {}, {});
SetLdsUpdate("", "", "fake_plugin1", "", false);
SendRpc([this]() { return CreateTlsChannel(); },
server_authenticated_identity_, {});
}
using EdsTest = BasicTest;
// Tests that EDS client should send a NACK if the EDS update contains
// sparse priorities.
TEST_P(EdsTest, NacksSparsePriorityList) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(), kDefaultLocalityWeight, 1},
});
balancers_[0]->ads_service()->SetEdsResource(BuildEdsResource(args));
CheckRpcSendFailure();
const auto response_state =
balancers_[0]->ads_service()->eds_response_state();
EXPECT_EQ(response_state.state, AdsServiceImpl::ResponseState::NACKED);
EXPECT_THAT(response_state.error_message,
::testing::HasSubstr("sparse priority list"));
}
// In most of our tests, we use different names for different resource
// types, to make sure that there are no cut-and-paste errors in the code
// that cause us to look at data for the wrong resource type. So we add
// this test to make sure that the EDS resource name defaults to the
// cluster name if not specified in the CDS resource.
TEST_P(EdsTest, EdsServiceNameDefaultsToClusterName) {
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, kDefaultClusterName));
Cluster cluster = default_cluster_;
cluster.mutable_eds_cluster_config()->clear_service_name();
balancers_[0]->ads_service()->SetCdsResource(cluster);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendOk();
}
class TimeoutTest : public BasicTest {
protected:
void SetUp() override {
xds_resource_does_not_exist_timeout_ms_ = 500;
BasicTest::SetUp();
}
};
// Tests that LDS client times out when no response received.
TEST_P(TimeoutTest, Lds) {
balancers_[0]->ads_service()->SetResourceIgnore(kLdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
}
TEST_P(TimeoutTest, Rds) {
balancers_[0]->ads_service()->SetResourceIgnore(kRdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
}
// Tests that CDS client times out when no response received.
TEST_P(TimeoutTest, Cds) {
balancers_[0]->ads_service()->SetResourceIgnore(kCdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
}
TEST_P(TimeoutTest, Eds) {
balancers_[0]->ads_service()->SetResourceIgnore(kEdsTypeUrl);
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
CheckRpcSendFailure();
}
using LocalityMapTest = BasicTest;
// Tests that the localities in a locality map are picked according to their
// weights.
TEST_P(LocalityMapTest, WeightedRoundRobin) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
const int kLocalityWeight0 = 2;
const int kLocalityWeight1 = 8;
const int kTotalLocalityWeight = kLocalityWeight0 + kLocalityWeight1;
const double kLocalityWeightRate0 =
static_cast<double>(kLocalityWeight0) / kTotalLocalityWeight;
const double kLocalityWeightRate1 =
static_cast<double>(kLocalityWeight1) / kTotalLocalityWeight;
5 years ago
// ADS response contains 2 localities, each of which contains 1 backend.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kLocalityWeight0},
{"locality1", GetBackendPorts(1, 2), kLocalityWeight1},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for both backends to be ready.
WaitForAllBackends(0, 2);
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
// The locality picking rates should be roughly equal to the expectation.
const double locality_picked_rate_0 =
static_cast<double>(backends_[0]->backend_service()->request_count()) /
kNumRpcs;
const double locality_picked_rate_1 =
static_cast<double>(backends_[1]->backend_service()->request_count()) /
kNumRpcs;
const double kErrorTolerance = 0.2;
EXPECT_THAT(locality_picked_rate_0,
::testing::AllOf(
::testing::Ge(kLocalityWeightRate0 * (1 - kErrorTolerance)),
::testing::Le(kLocalityWeightRate0 * (1 + kErrorTolerance))));
EXPECT_THAT(locality_picked_rate_1,
::testing::AllOf(
::testing::Ge(kLocalityWeightRate1 * (1 - kErrorTolerance)),
::testing::Le(kLocalityWeightRate1 * (1 + kErrorTolerance))));
}
// Tests that we correctly handle a locality containing no endpoints.
TEST_P(LocalityMapTest, LocalityContainingNoEndpoints) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
// EDS response contains 2 localities, one with no endpoints.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
{"locality1", {}},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for both backends to be ready.
WaitForAllBackends();
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
// All traffic should go to the reachable locality.
EXPECT_EQ(backends_[0]->backend_service()->request_count(),
kNumRpcs / backends_.size());
EXPECT_EQ(backends_[1]->backend_service()->request_count(),
kNumRpcs / backends_.size());
EXPECT_EQ(backends_[2]->backend_service()->request_count(),
kNumRpcs / backends_.size());
EXPECT_EQ(backends_[3]->backend_service()->request_count(),
kNumRpcs / backends_.size());
}
// EDS update with no localities.
TEST_P(LocalityMapTest, NoLocalities) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource({}, DefaultEdsServiceName()));
Status status = SendRpc();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
}
// Tests that the locality map can work properly even when it contains a large
// number of localities.
TEST_P(LocalityMapTest, StressTest) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumLocalities = 100;
5 years ago
// The first ADS response contains kNumLocalities localities, each of which
// contains backend 0.
AdsServiceImpl::EdsResourceArgs args;
for (size_t i = 0; i < kNumLocalities; ++i) {
std::string name = absl::StrCat("locality", i);
AdsServiceImpl::EdsResourceArgs::Locality locality(name,
{backends_[0]->port()});
args.locality_list.emplace_back(std::move(locality));
}
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
5 years ago
// The second ADS response contains 1 locality, which contains backend 1.
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(1, 2)},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), 60 * 1000));
// Wait until backend 0 is ready, before which kNumLocalities localities are
// received and handled by the xds policy.
WaitForBackend(0, /*reset_counters=*/false);
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
// Wait until backend 1 is ready, before which kNumLocalities localities are
// removed by the xds policy.
WaitForBackend(1);
delayed_resource_setter.join();
}
// Tests that the localities in a locality map are picked correctly after update
// (addition, modification, deletion).
TEST_P(LocalityMapTest, UpdateMap) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 3000;
// The locality weight for the first 3 localities.
const std::vector<int> kLocalityWeights0 = {2, 3, 4};
const double kTotalLocalityWeight0 =
std::accumulate(kLocalityWeights0.begin(), kLocalityWeights0.end(), 0);
std::vector<double> locality_weight_rate_0;
locality_weight_rate_0.reserve(kLocalityWeights0.size());
for (int weight : kLocalityWeights0) {
locality_weight_rate_0.push_back(weight / kTotalLocalityWeight0);
}
// Delete the first locality, keep the second locality, change the third
// locality's weight from 4 to 2, and add a new locality with weight 6.
const std::vector<int> kLocalityWeights1 = {3, 2, 6};
const double kTotalLocalityWeight1 =
std::accumulate(kLocalityWeights1.begin(), kLocalityWeights1.end(), 0);
std::vector<double> locality_weight_rate_1 = {
0 /* placeholder for locality 0 */};
for (int weight : kLocalityWeights1) {
locality_weight_rate_1.push_back(weight / kTotalLocalityWeight1);
}
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), 2},
{"locality1", GetBackendPorts(1, 2), 3},
{"locality2", GetBackendPorts(2, 3), 4},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for the first 3 backends to be ready.
WaitForAllBackends(0, 3);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// The picking rates of the first 3 backends should be roughly equal to the
// expectation.
std::vector<double> locality_picked_rates;
for (size_t i = 0; i < 3; ++i) {
locality_picked_rates.push_back(
static_cast<double>(backends_[i]->backend_service()->request_count()) /
kNumRpcs);
}
const double kErrorTolerance = 0.2;
for (size_t i = 0; i < 3; ++i) {
gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
locality_picked_rates[i]);
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
::testing::Ge(locality_weight_rate_0[i] * (1 - kErrorTolerance)),
::testing::Le(locality_weight_rate_0[i] * (1 + kErrorTolerance))));
}
args = AdsServiceImpl::EdsResourceArgs({
{"locality1", GetBackendPorts(1, 2), 3},
{"locality2", GetBackendPorts(2, 3), 2},
{"locality3", GetBackendPorts(3, 4), 6},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Backend 3 hasn't received any request.
EXPECT_EQ(0U, backends_[3]->backend_service()->request_count());
// Wait until the locality update has been processed, as signaled by backend 3
// receiving a request.
WaitForAllBackends(3, 4);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// Backend 0 no longer receives any request.
EXPECT_EQ(0U, backends_[0]->backend_service()->request_count());
// The picking rates of the last 3 backends should be roughly equal to the
// expectation.
locality_picked_rates = {0 /* placeholder for backend 0 */};
for (size_t i = 1; i < 4; ++i) {
locality_picked_rates.push_back(
static_cast<double>(backends_[i]->backend_service()->request_count()) /
kNumRpcs);
}
for (size_t i = 1; i < 4; ++i) {
gpr_log(GPR_INFO, "Locality %" PRIuPTR " rate %f", i,
locality_picked_rates[i]);
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
::testing::Ge(locality_weight_rate_1[i] * (1 - kErrorTolerance)),
::testing::Le(locality_weight_rate_1[i] * (1 + kErrorTolerance))));
}
}
// Tests that we don't fail RPCs when replacing all of the localities in
// a given priority.
TEST_P(LocalityMapTest, ReplaceAllLocalitiesInPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality1", GetBackendPorts(1, 2)},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), 5000));
// Wait for the first backend to be ready.
WaitForBackend(0);
// Keep sending RPCs until we switch over to backend 1, which tells us
// that we received the update. No RPCs should fail during this
// transition.
WaitForBackend(1, /*reset_counters=*/true, /*require_success=*/true);
delayed_resource_setter.join();
}
class FailoverTest : public BasicTest {
public:
void SetUp() override {
BasicTest::SetUp();
ResetStub(500);
}
};
// Localities with the highest priority are used when multiple priority exist.
TEST_P(FailoverTest, ChooseHighestPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
}
// Does not choose priority with no endpoints.
TEST_P(FailoverTest, DoesNotUsePriorityWithNoEndpoints) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", {}, kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(0, false);
for (size_t i = 1; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
}
// Does not choose locality with no endpoints.
TEST_P(FailoverTest, DoesNotUseLocalityWithNoEndpoints) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {}, kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for all backends to be used.
std::tuple<int, int, int> counts = WaitForAllBackends();
// Make sure no RPCs failed in the transition.
EXPECT_EQ(0, std::get<1>(counts));
}
// If the higher priority localities are not reachable, failover to the highest
// priority among the rest.
TEST_P(FailoverTest, Failover) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
ShutdownBackend(3);
ShutdownBackend(0);
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
}
// If a locality with higher priority than the current one becomes ready,
// switch to it.
TEST_P(FailoverTest, SwitchBackToHigherPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 100;
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForBackend(3);
ShutdownBackend(3);
ShutdownBackend(0);
WaitForBackend(1, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 1) continue;
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
StartBackend(0);
WaitForBackend(0);
CheckRpcSendOk(kNumRpcs);
EXPECT_EQ(kNumRpcs, backends_[0]->backend_service()->request_count());
}
// The first update only contains unavailable priorities. The second update
// contains available priorities.
TEST_P(FailoverTest, UpdateInitialUnavailable) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 1},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 2},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 3},
});
ShutdownBackend(0);
ShutdownBackend(1);
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), 1000));
gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(500, GPR_TIMESPAN));
// Send 0.5 second worth of RPCs.
do {
CheckRpcSendFailure();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
WaitForBackend(2, false);
for (size_t i = 0; i < 4; ++i) {
if (i == 2) continue;
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
delayed_resource_setter.join();
}
// Tests that after the localities' priorities are updated, we still choose the
// highest READY priority with the updated localities.
TEST_P(FailoverTest, UpdatePriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 100;
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 1},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 2},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 3},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 0},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 2},
{"locality1", GetBackendPorts(1, 2), kDefaultLocalityWeight, 0},
{"locality2", GetBackendPorts(2, 3), kDefaultLocalityWeight, 1},
{"locality3", GetBackendPorts(3, 4), kDefaultLocalityWeight, 3},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), 1000));
WaitForBackend(3, false);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(0U, backends_[i]->backend_service()->request_count());
}
WaitForBackend(1);
CheckRpcSendOk(kNumRpcs);
EXPECT_EQ(kNumRpcs, backends_[1]->backend_service()->request_count());
delayed_resource_setter.join();
}
// Moves all localities in the current priority to a higher priority.
TEST_P(FailoverTest, MoveAllLocalitiesInCurrentPriorityToHigherPriority) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
// First update:
// - Priority 0 is locality 0, containing backend 0, which is down.
// - Priority 1 is locality 1, containing backends 1 and 2, which are up.
ShutdownBackend(0);
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 3), kDefaultLocalityWeight, 1},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Second update:
// - Priority 0 contains both localities 0 and 1.
// - Priority 1 is not present.
// - We add backend 3 to locality 1, just so we have a way to know
// when the update has been seen by the client.
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(0, 1), kDefaultLocalityWeight, 0},
{"locality1", GetBackendPorts(1, 4), kDefaultLocalityWeight, 0},
});
std::thread delayed_resource_setter(
std::bind(&BasicTest::SetEdsResourceWithDelay, this, 0,
BuildEdsResource(args, DefaultEdsServiceName()), 1000));
// When we get the first update, all backends in priority 0 are down,
// so we will create priority 1. Backends 1 and 2 should have traffic,
// but backend 3 should not.
WaitForAllBackends(1, 3, false);
EXPECT_EQ(0UL, backends_[3]->backend_service()->request_count());
// When backend 3 gets traffic, we know the second update has been seen.
WaitForBackend(3);
// The ADS service of balancer 0 got at least 1 response.
EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
delayed_resource_setter.join();
}
using DropTest = BasicTest;
// Tests that RPCs are dropped according to the drop config.
TEST_P(DropTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 200000;
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0;
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0;
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
5 years ago
// The ADS response contains two drop categories.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
const double kErrorTolerance = 0.2;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)),
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance))));
}
// Tests that drop config is converted correctly from per hundred.
TEST_P(DropTest, DropPerHundred) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
const uint32_t kDropPerHundredForLb = 10;
const double kDropRateForLb = kDropPerHundredForLb / 100.0;
5 years ago
// The ADS response contains one drop category.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerHundredForLb}};
args.drop_denominator = FractionalPercent::HUNDRED;
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
const double kErrorTolerance = 0.2;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(kDropRateForLb * (1 + kErrorTolerance))));
}
// Tests that drop config is converted correctly from per ten thousand.
TEST_P(DropTest, DropPerTenThousand) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 5000;
const uint32_t kDropPerTenThousandForLb = 1000;
const double kDropRateForLb = kDropPerTenThousandForLb / 10000.0;
5 years ago
// The ADS response contains one drop category.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerTenThousandForLb}};
args.drop_denominator = FractionalPercent::TEN_THOUSAND;
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
const double kErrorTolerance = 0.2;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(kDropRateForLb * (1 + kErrorTolerance))));
}
// Tests that drop is working correctly after update.
TEST_P(DropTest, Update) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 3000;
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 200000;
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0;
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0;
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
5 years ago
// The first ADS response contains one drop category.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb}};
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
// Send kNumRpcs RPCs and count the drops.
size_t num_drops = 0;
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// The drop rate should be roughly equal to the expectation.
double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
gpr_log(GPR_INFO, "First batch drop rate %f", seen_drop_rate);
5 years ago
const double kErrorTolerance = 0.3;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(kDropRateForLb * (1 + kErrorTolerance))));
// The second ADS response contains two drop categories, send an update EDS
// response.
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the drop rate increases to the middle of the two configs, which
// implies that the update has been in effect.
const double kDropRateThreshold =
(kDropRateForLb + KDropRateForLbAndThrottle) / 2;
size_t num_rpcs = kNumRpcs;
while (seen_drop_rate < kDropRateThreshold) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
++num_rpcs;
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
seen_drop_rate = static_cast<double>(num_drops) / num_rpcs;
}
// Send kNumRpcs RPCs and count the drops.
num_drops = 0;
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// The new drop rate should be roughly equal to the expectation.
seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
gpr_log(GPR_INFO, "Second batch drop rate %f", seen_drop_rate);
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)),
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance))));
}
// Tests that all the RPCs are dropped if any drop category drops 100%.
TEST_P(DropTest, DropAll) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 1000;
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 1000000;
5 years ago
// The ADS response contains two drop categories.
AdsServiceImpl::EdsResourceArgs args;
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Send kNumRpcs RPCs and all of them are dropped.
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE);
EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
}
}
class BalancerUpdateTest : public XdsEnd2endTest {
public:
BalancerUpdateTest() : XdsEnd2endTest(4, 3) {}
};
// Tests that the old LB call is still used after the balancer address update as
// long as that call is still alive.
TEST_P(BalancerUpdateTest, UpdateBalancersButKeepUsingOriginalBalancer) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The ADS service of balancer 0 sent at least 1 response.
EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[1]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// The current LB call is still working, so xds continued using it to the
// first balancer, which doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
// The ADS service of balancer 0 sent at least 1 response.
EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[1]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
}
// Tests that the old LB call is still used after multiple balancer address
// updates as long as that call is still alive. Send an update with the same set
// of LBs as the one in SetUp() in order to verify that the LB channel inside
// xds keeps the initial connection (which by definition is also present in the
// update).
TEST_P(BalancerUpdateTest, Repeated) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The ADS service of balancer 0 sent at least 1 response.
EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[1]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
std::vector<int> ports;
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
ports.emplace_back(balancers_[2]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
gpr_timespec deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
ports.clear();
ports.emplace_back(balancers_[0]->port());
ports.emplace_back(balancers_[1]->port());
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
SetNextResolutionForLbChannel(ports);
gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(10000, GPR_TIMESPAN));
// Send 10 seconds worth of RPCs
do {
CheckRpcSendOk();
} while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
// xds continued using the original LB call to the first balancer, which
// doesn't assign the second backend.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
}
// Tests that if the balancer is down, the RPCs will still be sent to the
// backends according to the last balancer response, until a new balancer is
// reachable.
TEST_P(BalancerUpdateTest, DeadUpdate) {
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
AdsServiceImpl::EdsResourceArgs args({
{"locality0", {backends_[0]->port()}},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", {backends_[1]->port()}},
});
balancers_[1]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->backend_service()->request_count());
// The ADS service of balancer 0 sent at least 1 response.
EXPECT_GT(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[1]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
// Kill balancer 0
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
balancers_[0]->Shutdown();
gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
// This is serviced by the existing child policy.
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// All 10 requests should again have gone to the first backend.
EXPECT_EQ(20U, backends_[0]->backend_service()->request_count());
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
// The ADS service of no balancers sent anything
EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[0]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[1]->ads_service()->eds_response_state().error_message;
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
SetNextResolutionForLbChannel({balancers_[1]->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
// Wait until update has been processed, as signaled by the second backend
// receiving a request. In the meantime, the client continues to be serviced
// (by the first backend) without interruption.
EXPECT_EQ(0U, backends_[1]->backend_service()->request_count());
WaitForBackend(1);
// This is serviced by the updated RR policy
backends_[1]->backend_service()->ResetCounters();
gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->backend_service()->request_count());
// The ADS service of balancer 1 sent at least 1 response.
EXPECT_EQ(balancers_[0]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[0]->ads_service()->eds_response_state().error_message;
EXPECT_GT(balancers_[1]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT);
EXPECT_EQ(balancers_[2]->ads_service()->eds_response_state().state,
AdsServiceImpl::ResponseState::NOT_SENT)
<< "Error Message:"
<< balancers_[2]->ads_service()->eds_response_state().error_message;
}
class ClientLoadReportingTest : public XdsEnd2endTest {
public:
ClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {}
};
// Tests that the load report received at the balancer is correct.
TEST_P(ClientLoadReportingTest, Vanilla) {
if (!GetParam().use_xds_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 10;
const size_t kNumFailuresPerAddress = 3;
// TODO(juanlishen): Partition the backends after multiple localities is
// tested.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
RpcOptions().set_server_fail(true));
// Check that each backend got the right number of requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
backends_[i]->backend_service()->request_count());
}
// The load report received at the balancer should be correct.
std::vector<ClientStats> load_report =
balancers_[0]->lrs_service()->WaitForLoadReport();
ASSERT_EQ(load_report.size(), 1UL);
ClientStats& client_stats = load_report.front();
EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
client_stats.total_successful_requests());
EXPECT_EQ(0U, client_stats.total_requests_in_progress());
EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
num_ok + num_failure,
client_stats.total_issued_requests());
EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
client_stats.total_error_requests());
EXPECT_EQ(0U, client_stats.total_dropped_requests());
// The LRS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
}
// Tests send_all_clusters.
TEST_P(ClientLoadReportingTest, SendAllClusters) {
balancers_[0]->lrs_service()->set_send_all_clusters(true);
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 10;
const size_t kNumFailuresPerAddress = 3;
// TODO(juanlishen): Partition the backends after multiple localities is
// tested.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
CheckRpcSendFailure(kNumFailuresPerAddress * num_backends_,
RpcOptions().set_server_fail(true));
// Check that each backend got the right number of requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress + kNumFailuresPerAddress,
backends_[i]->backend_service()->request_count());
}
// The load report received at the balancer should be correct.
std::vector<ClientStats> load_report =
balancers_[0]->lrs_service()->WaitForLoadReport();
ASSERT_EQ(load_report.size(), 1UL);
ClientStats& client_stats = load_report.front();
EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
client_stats.total_successful_requests());
EXPECT_EQ(0U, client_stats.total_requests_in_progress());
EXPECT_EQ((kNumRpcsPerAddress + kNumFailuresPerAddress) * num_backends_ +
num_ok + num_failure,
client_stats.total_issued_requests());
EXPECT_EQ(kNumFailuresPerAddress * num_backends_ + num_failure,
client_stats.total_error_requests());
EXPECT_EQ(0U, client_stats.total_dropped_requests());
// The LRS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
}
// Tests that we don't include stats for clusters that are not requested
// by the LRS server.
TEST_P(ClientLoadReportingTest, HonorsClustersRequestedByLrsServer) {
balancers_[0]->lrs_service()->set_cluster_names({"bogus"});
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumRpcsPerAddress = 100;
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends are ready.
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
// Send kNumRpcsPerAddress RPCs per server.
CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
// Each backend should have gotten 100 requests.
for (size_t i = 0; i < backends_.size(); ++i) {
EXPECT_EQ(kNumRpcsPerAddress,
backends_[i]->backend_service()->request_count());
}
// The LRS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count());
// The load report received at the balancer should be correct.
std::vector<ClientStats> load_report =
balancers_[0]->lrs_service()->WaitForLoadReport();
ASSERT_EQ(load_report.size(), 0UL);
}
// Tests that if the balancer restarts, the client load report contains the
// stats before and after the restart correctly.
TEST_P(ClientLoadReportingTest, BalancerRestart) {
if (!GetParam().use_xds_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
SetNextResolutionForLbChannel({balancers_[0]->port()});
const size_t kNumBackendsFirstPass = backends_.size() / 2;
const size_t kNumBackendsSecondPass =
backends_.size() - kNumBackendsFirstPass;
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts(0, kNumBackendsFirstPass)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait until all backends returned by the balancer are ready.
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) =
WaitForAllBackends(/* start_index */ 0,
/* stop_index */ kNumBackendsFirstPass);
std::vector<ClientStats> load_report =
balancers_[0]->lrs_service()->WaitForLoadReport();
ASSERT_EQ(load_report.size(), 1UL);
ClientStats client_stats = std::move(load_report.front());
EXPECT_EQ(static_cast<size_t>(num_ok),
client_stats.total_successful_requests());
EXPECT_EQ(0U, client_stats.total_requests_in_progress());
EXPECT_EQ(0U, client_stats.total_error_requests());
EXPECT_EQ(0U, client_stats.total_dropped_requests());
// Shut down the balancer.
balancers_[0]->Shutdown();
// We should continue using the last EDS response we received from the
// balancer before it was shut down.
// Note: We need to use WaitForAllBackends() here instead of just
// CheckRpcSendOk(kNumBackendsFirstPass), because when the balancer
// shuts down, the XdsClient will generate an error to the
// ServiceConfigWatcher, which will cause the xds resolver to send a
// no-op update to the LB policy. When this update gets down to the
// round_robin child policy for the locality, it will generate a new
// subchannel list, which resets the start index randomly. So we need
// to be a little more permissive here to avoid spurious failures.
ResetBackendCounters();
int num_started = std::get<0>(WaitForAllBackends(
/* start_index */ 0, /* stop_index */ kNumBackendsFirstPass));
// Now restart the balancer, this time pointing to the new backends.
balancers_[0]->Start();
args = AdsServiceImpl::EdsResourceArgs({
{"locality0", GetBackendPorts(kNumBackendsFirstPass)},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
// Wait for queries to start going to one of the new backends.
// This tells us that we're now using the new serverlist.
std::tie(num_ok, num_failure, num_drops) =
WaitForAllBackends(/* start_index */ kNumBackendsFirstPass);
num_started += num_ok + num_failure + num_drops;
// Send one RPC per backend.
CheckRpcSendOk(kNumBackendsSecondPass);
num_started += kNumBackendsSecondPass;
// Check client stats.
load_report = balancers_[0]->lrs_service()->WaitForLoadReport();
ASSERT_EQ(load_report.size(), 1UL);
client_stats = std::move(load_report.front());
EXPECT_EQ(num_started, client_stats.total_successful_requests());
EXPECT_EQ(0U, client_stats.total_requests_in_progress());
EXPECT_EQ(0U, client_stats.total_error_requests());
EXPECT_EQ(0U, client_stats.total_dropped_requests());
}
class ClientLoadReportingWithDropTest : public XdsEnd2endTest {
public:
ClientLoadReportingWithDropTest() : XdsEnd2endTest(4, 1, 20) {}
};
// Tests that the drop stats are correctly reported by client load reporting.
TEST_P(ClientLoadReportingWithDropTest, Vanilla) {
if (!GetParam().use_xds_resolver()) {
balancers_[0]->lrs_service()->set_cluster_names({kServerName});
}
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 3000;
const uint32_t kDropPerMillionForLb = 100000;
const uint32_t kDropPerMillionForThrottle = 200000;
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0;
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0;
const double KDropRateForLbAndThrottle =
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle;
5 years ago
// The ADS response contains two drop categories.
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
args.drop_categories = {{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}};
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
int num_ok = 0;
int num_failure = 0;
int num_drops = 0;
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
const size_t num_warmup = num_ok + num_failure + num_drops;
// Send kNumRpcs RPCs and count the drops.
for (size_t i = 0; i < kNumRpcs; ++i) {
EchoResponse response;
const Status status = SendRpc(RpcOptions(), &response);
if (!status.ok() &&
status.error_message() == "Call dropped by load balancing policy") {
++num_drops;
} else {
EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
<< " message=" << status.error_message();
EXPECT_EQ(response.message(), kRequestMessage);
}
}
// The drop rate should be roughly equal to the expectation.
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs;
const double kErrorTolerance = 0.2;
EXPECT_THAT(
seen_drop_rate,
::testing::AllOf(
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)),
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance))));
// Check client stats.
const size_t total_rpc = num_warmup + kNumRpcs;
ClientStats client_stats;
do {
std::vector<ClientStats> load_reports =
balancers_[0]->lrs_service()->WaitForLoadReport();
for (const auto& load_report : load_reports) {
client_stats += load_report;
}
} while (client_stats.total_issued_requests() +
client_stats.total_dropped_requests() <
total_rpc);
EXPECT_EQ(num_drops, client_stats.total_dropped_requests());
EXPECT_THAT(
client_stats.dropped_requests(kLbDropType),
::testing::AllOf(
::testing::Ge(total_rpc * kDropRateForLb * (1 - kErrorTolerance)),
::testing::Le(total_rpc * kDropRateForLb * (1 + kErrorTolerance))));
EXPECT_THAT(client_stats.dropped_requests(kThrottleDropType),
::testing::AllOf(
::testing::Ge(total_rpc * (1 - kDropRateForLb) *
kDropRateForThrottle * (1 - kErrorTolerance)),
::testing::Le(total_rpc * (1 - kDropRateForLb) *
kDropRateForThrottle * (1 + kErrorTolerance))));
}
class BootstrapContentsFromEnvVarTest : public XdsEnd2endTest {
public:
BootstrapContentsFromEnvVarTest() : XdsEnd2endTest(4, 1, 100, false, true) {}
};
TEST_P(BootstrapContentsFromEnvVarTest, Vanilla) {
SetNextResolution({});
SetNextResolutionForLbChannelAllBalancers();
AdsServiceImpl::EdsResourceArgs args({
{"locality0", GetBackendPorts()},
});
balancers_[0]->ads_service()->SetEdsResource(
BuildEdsResource(args, DefaultEdsServiceName()));
WaitForAllBackends();
}
std::string TestTypeName(const ::testing::TestParamInfo<TestType>& info) {
return info.param.AsString();
}
// TestType params:
// - use_xds_resolver
// - enable_load_reporting
// - enable_rds_testing = false
// - use_v2 = false
// - use_xds_credentials = false
INSTANTIATE_TEST_SUITE_P(XdsTest, BasicTest,
::testing::Values(TestType(false, true),
TestType(false, false),
TestType(true, false),
TestType(true, true)),
&TestTypeName);
// Run with both fake resolver and xds resolver.
// Don't run with load reporting or v2 or RDS, since they are irrelevant to
// the tests.
INSTANTIATE_TEST_SUITE_P(XdsTest, SecureNamingTest,
::testing::Values(TestType(false, false),
TestType(true, false)),
&TestTypeName);
// LDS depends on XdsResolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, LdsTest,
::testing::Values(TestType(true, false),
TestType(true, true)),
&TestTypeName);
// LDS/RDS commmon tests depend on XdsResolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, LdsRdsTest,
::testing::Values(TestType(true, false),
TestType(true, true),
TestType(true, false, true),
4 years ago
TestType(true, true, true),
// Also test with xDS v2.
TestType(true, true, true, true)),
&TestTypeName);
// CDS depends on XdsResolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, CdsTest,
::testing::Values(TestType(true, false),
TestType(true, true)),
&TestTypeName);
// CDS depends on XdsResolver.
// Security depends on v3.
// Not enabling load reporting or RDS, since those are irrelevant to these
// tests.
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsSecurityTest,
::testing::Values(TestType(true, false, false, false,
true)),
&TestTypeName);
// We are only testing the server here.
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsEnabledServerTest,
::testing::Values(TestType(true, false, false, false,
false)),
&TestTypeName);
// We are only testing the server here.
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsServerSecurityTest,
::testing::Values(TestType(false, false, false, false,
true)),
&TestTypeName);
// EDS could be tested with or without XdsResolver, but the tests would
// be the same either way, so we test it only with XdsResolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, EdsTest,
::testing::Values(TestType(true, false),
TestType(true, true)),
&TestTypeName);
// Test initial resource timeouts for each resource type.
// Do this only for XdsResolver with RDS enabled, so that we can test
// all resource types.
// Run with V3 only, since the functionality is no different in V2.
INSTANTIATE_TEST_SUITE_P(XdsTest, TimeoutTest,
::testing::Values(TestType(true, false, true)),
&TestTypeName);
// XdsResolverOnlyTest depends on XdsResolver.
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverOnlyTest,
::testing::Values(TestType(true, false),
TestType(true, true)),
&TestTypeName);
// XdsResolverLoadReprtingOnlyTest depends on XdsResolver and load reporting.
INSTANTIATE_TEST_SUITE_P(XdsTest, XdsResolverLoadReportingOnlyTest,
::testing::Values(TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, LocalityMapTest,
::testing::Values(TestType(false, true),
TestType(false, false),
TestType(true, false),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, FailoverTest,
::testing::Values(TestType(false, true),
TestType(false, false),
TestType(true, false),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, DropTest,
::testing::Values(TestType(false, true),
TestType(false, false),
TestType(true, false),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, BalancerUpdateTest,
::testing::Values(TestType(false, true),
TestType(false, false),
TestType(true, true)),
&TestTypeName);
// Load reporting tests are not run with load reporting disabled.
INSTANTIATE_TEST_SUITE_P(XdsTest, ClientLoadReportingTest,
::testing::Values(TestType(false, true),
TestType(true, true)),
&TestTypeName);
// Load reporting tests are not run with load reporting disabled.
INSTANTIATE_TEST_SUITE_P(XdsTest, ClientLoadReportingWithDropTest,
::testing::Values(TestType(false, true),
TestType(true, true)),
&TestTypeName);
INSTANTIATE_TEST_SUITE_P(XdsTest, BootstrapContentsFromEnvVarTest,
::testing::Values(TestType(true, false)),
&TestTypeName);
} // namespace
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv);
grpc::testing::WriteBootstrapFiles();
grpc::testing::g_port_saver = new grpc::testing::PortSaver();
// Make the backup poller poll very frequently in order to pick up
// updates from all the subchannels's FDs.
GPR_GLOBAL_CONFIG_SET(grpc_client_channel_backup_poll_interval_ms, 1);
#if TARGET_OS_IPHONE
// Workaround Apple CFStream bug
gpr_setenv("grpc_cfstream", "0");
#endif
grpc_core::CertificateProviderRegistry::RegisterCertificateProviderFactory(
absl::make_unique<grpc::testing::FakeCertificateProviderFactory>(
"fake1", &grpc::testing::g_fake1_cert_data_map));
grpc_core::CertificateProviderRegistry::RegisterCertificateProviderFactory(
absl::make_unique<grpc::testing::FakeCertificateProviderFactory>(
"fake2", &grpc::testing::g_fake2_cert_data_map));
grpc_init();
const auto result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}