|
|
@ -84,6 +84,7 @@ using ::envoy::api::v2::ClusterLoadAssignment; |
|
|
|
using ::envoy::api::v2::DiscoveryRequest; |
|
|
|
using ::envoy::api::v2::DiscoveryRequest; |
|
|
|
using ::envoy::api::v2::DiscoveryResponse; |
|
|
|
using ::envoy::api::v2::DiscoveryResponse; |
|
|
|
using ::envoy::api::v2::EndpointDiscoveryService; |
|
|
|
using ::envoy::api::v2::EndpointDiscoveryService; |
|
|
|
|
|
|
|
using ::envoy::api::v2::FractionalPercent; |
|
|
|
using ::envoy::service::load_stats::v2::ClusterStats; |
|
|
|
using ::envoy::service::load_stats::v2::ClusterStats; |
|
|
|
using ::envoy::service::load_stats::v2::LoadReportingService; |
|
|
|
using ::envoy::service::load_stats::v2::LoadReportingService; |
|
|
|
using ::envoy::service::load_stats::v2::LoadStatsRequest; |
|
|
|
using ::envoy::service::load_stats::v2::LoadStatsRequest; |
|
|
@ -95,6 +96,8 @@ constexpr char kEdsTypeUrl[] = |
|
|
|
constexpr char kDefaultLocalityRegion[] = "xds_default_locality_region"; |
|
|
|
constexpr char kDefaultLocalityRegion[] = "xds_default_locality_region"; |
|
|
|
constexpr char kDefaultLocalityZone[] = "xds_default_locality_zone"; |
|
|
|
constexpr char kDefaultLocalityZone[] = "xds_default_locality_zone"; |
|
|
|
constexpr char kDefaultLocalitySubzone[] = "xds_default_locality_subzone"; |
|
|
|
constexpr char kDefaultLocalitySubzone[] = "xds_default_locality_subzone"; |
|
|
|
|
|
|
|
constexpr char kLbDropType[] = "lb"; |
|
|
|
|
|
|
|
constexpr char kThrottleDropType[] = "throttle"; |
|
|
|
|
|
|
|
|
|
|
|
template <typename ServiceType> |
|
|
|
template <typename ServiceType> |
|
|
|
class CountedService : public ServiceType { |
|
|
|
class CountedService : public ServiceType { |
|
|
@ -205,6 +208,11 @@ class ClientStats { |
|
|
|
locality_stats_.emplace(input_locality_stats.locality().sub_zone(), |
|
|
|
locality_stats_.emplace(input_locality_stats.locality().sub_zone(), |
|
|
|
LocalityStats(input_locality_stats)); |
|
|
|
LocalityStats(input_locality_stats)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
for (const auto& input_dropped_requests : |
|
|
|
|
|
|
|
cluster_stats.dropped_requests()) { |
|
|
|
|
|
|
|
dropped_requests_.emplace(input_dropped_requests.category(), |
|
|
|
|
|
|
|
input_dropped_requests.dropped_count()); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
uint64_t total_successful_requests() const { |
|
|
|
uint64_t total_successful_requests() const { |
|
|
@ -236,10 +244,16 @@ class ClientStats { |
|
|
|
return sum; |
|
|
|
return sum; |
|
|
|
} |
|
|
|
} |
|
|
|
uint64_t total_dropped_requests() const { return total_dropped_requests_; } |
|
|
|
uint64_t total_dropped_requests() const { return total_dropped_requests_; } |
|
|
|
|
|
|
|
uint64_t dropped_requests(const grpc::string& category) const { |
|
|
|
|
|
|
|
auto iter = dropped_requests_.find(category); |
|
|
|
|
|
|
|
GPR_ASSERT(iter != dropped_requests_.end()); |
|
|
|
|
|
|
|
return iter->second; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
private: |
|
|
|
private: |
|
|
|
std::map<grpc::string, LocalityStats> locality_stats_; |
|
|
|
std::map<grpc::string, LocalityStats> locality_stats_; |
|
|
|
uint64_t total_dropped_requests_; |
|
|
|
uint64_t total_dropped_requests_; |
|
|
|
|
|
|
|
std::map<grpc::string, uint64_t> dropped_requests_; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
class EdsServiceImpl : public EdsService { |
|
|
|
class EdsServiceImpl : public EdsService { |
|
|
@ -301,8 +315,11 @@ class EdsServiceImpl : public EdsService { |
|
|
|
gpr_log(GPR_INFO, "LB[%p]: shut down", this); |
|
|
|
gpr_log(GPR_INFO, "LB[%p]: shut down", this); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static DiscoveryResponse BuildResponseForBackends( |
|
|
|
static DiscoveryResponse BuildResponse( |
|
|
|
const std::vector<std::vector<int>>& backend_ports) { |
|
|
|
const std::vector<std::vector<int>>& backend_ports, |
|
|
|
|
|
|
|
const std::map<grpc::string, uint32_t>& drop_categories = {}, |
|
|
|
|
|
|
|
const FractionalPercent::DenominatorType denominator = |
|
|
|
|
|
|
|
FractionalPercent::MILLION) { |
|
|
|
ClusterLoadAssignment assignment; |
|
|
|
ClusterLoadAssignment assignment; |
|
|
|
assignment.set_cluster_name("service name"); |
|
|
|
assignment.set_cluster_name("service name"); |
|
|
|
for (size_t i = 0; i < backend_ports.size(); ++i) { |
|
|
|
for (size_t i = 0; i < backend_ports.size(); ++i) { |
|
|
@ -323,6 +340,18 @@ class EdsServiceImpl : public EdsService { |
|
|
|
socket_address->set_port_value(backend_port); |
|
|
|
socket_address->set_port_value(backend_port); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if (!drop_categories.empty()) { |
|
|
|
|
|
|
|
auto* policy = assignment.mutable_policy(); |
|
|
|
|
|
|
|
for (const auto& p : drop_categories) { |
|
|
|
|
|
|
|
const grpc::string& name = p.first; |
|
|
|
|
|
|
|
const uint32_t parts_per_million = p.second; |
|
|
|
|
|
|
|
auto* drop_overload = policy->add_drop_overloads(); |
|
|
|
|
|
|
|
drop_overload->set_category(name); |
|
|
|
|
|
|
|
auto* drop_percentage = drop_overload->mutable_drop_percentage(); |
|
|
|
|
|
|
|
drop_percentage->set_numerator(parts_per_million); |
|
|
|
|
|
|
|
drop_percentage->set_denominator(denominator); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
DiscoveryResponse response; |
|
|
|
DiscoveryResponse response; |
|
|
|
response.set_type_url(kEdsTypeUrl); |
|
|
|
response.set_type_url(kEdsTypeUrl); |
|
|
|
response.add_resources()->PackFrom(assignment); |
|
|
|
response.add_resources()->PackFrom(assignment); |
|
|
@ -883,8 +912,7 @@ TEST_F(SingleBalancerTest, Vanilla) { |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
const size_t kNumRpcsPerAddress = 100; |
|
|
|
const size_t kNumRpcsPerAddress = 100; |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), |
|
|
|
0, EdsServiceImpl::BuildResponse(GetBackendPortsInGroups()), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
// Make sure that trying to connect works without a call.
|
|
|
|
// Make sure that trying to connect works without a call.
|
|
|
|
channel_->GetState(true /* try_to_connect */); |
|
|
|
channel_->GetState(true /* try_to_connect */); |
|
|
|
// We need to wait for all backends to come online.
|
|
|
|
// We need to wait for all backends to come online.
|
|
|
@ -912,8 +940,7 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { |
|
|
|
ports.push_back(backends_[0]->port()); |
|
|
|
ports.push_back(backends_[0]->port()); |
|
|
|
ports.push_back(backends_[0]->port()); |
|
|
|
ports.push_back(backends_[0]->port()); |
|
|
|
const size_t kNumRpcsPerAddress = 10; |
|
|
|
const size_t kNumRpcsPerAddress = 10; |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse({ports}), 0); |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends({ports}), 0); |
|
|
|
|
|
|
|
// We need to wait for the backend to come online.
|
|
|
|
// We need to wait for the backend to come online.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
|
// Send kNumRpcsPerAddress RPCs per server.
|
|
|
|
// Send kNumRpcsPerAddress RPCs per server.
|
|
|
@ -934,8 +961,7 @@ TEST_F(SingleBalancerTest, SecureNaming) { |
|
|
|
SetNextResolutionForLbChannel({balancers_[0]->port()}); |
|
|
|
SetNextResolutionForLbChannel({balancers_[0]->port()}); |
|
|
|
const size_t kNumRpcsPerAddress = 100; |
|
|
|
const size_t kNumRpcsPerAddress = 100; |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), |
|
|
|
0, EdsServiceImpl::BuildResponse(GetBackendPortsInGroups()), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
// Make sure that trying to connect works without a call.
|
|
|
|
// Make sure that trying to connect works without a call.
|
|
|
|
channel_->GetState(true /* try_to_connect */); |
|
|
|
channel_->GetState(true /* try_to_connect */); |
|
|
|
// We need to wait for all backends to come online.
|
|
|
|
// We need to wait for all backends to come online.
|
|
|
@ -980,11 +1006,10 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) { |
|
|
|
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); |
|
|
|
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); |
|
|
|
const int kCallDeadlineMs = kServerlistDelayMs * 2; |
|
|
|
const int kCallDeadlineMs = kServerlistDelayMs * 2; |
|
|
|
// First response is an empty serverlist, sent right away.
|
|
|
|
// First response is an empty serverlist, sent right away.
|
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponseForBackends({{}}), |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse({{}}), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs
|
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), |
|
|
|
0, EdsServiceImpl::BuildResponse(GetBackendPortsInGroups()), |
|
|
|
kServerlistDelayMs); |
|
|
|
kServerlistDelayMs); |
|
|
|
const auto t0 = system_clock::now(); |
|
|
|
const auto t0 = system_clock::now(); |
|
|
|
// Client will block: LB will initially send empty serverlist.
|
|
|
|
// Client will block: LB will initially send empty serverlist.
|
|
|
@ -1012,8 +1037,7 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) { |
|
|
|
for (size_t i = 0; i < kNumUnreachableServers; ++i) { |
|
|
|
for (size_t i = 0; i < kNumUnreachableServers; ++i) { |
|
|
|
ports.push_back(grpc_pick_unused_port_or_die()); |
|
|
|
ports.push_back(grpc_pick_unused_port_or_die()); |
|
|
|
} |
|
|
|
} |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse({ports}), 0); |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends({ports}), 0); |
|
|
|
|
|
|
|
const Status status = SendRpc(); |
|
|
|
const Status status = SendRpc(); |
|
|
|
// The error shouldn't be DEADLINE_EXCEEDED.
|
|
|
|
// The error shouldn't be DEADLINE_EXCEEDED.
|
|
|
|
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); |
|
|
|
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); |
|
|
@ -1023,6 +1047,254 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) { |
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, Drop) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 5000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForLb = 100000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForThrottle = 200000; |
|
|
|
|
|
|
|
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0; |
|
|
|
|
|
|
|
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0; |
|
|
|
|
|
|
|
const double KDropRateForLbAndThrottle = |
|
|
|
|
|
|
|
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; |
|
|
|
|
|
|
|
// The EDS response contains two drop categories.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
|
|
|
|
GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerMillionForLb}, |
|
|
|
|
|
|
|
{kThrottleDropType, kDropPerMillionForThrottle}}), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
WaitForAllBackends(); |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
size_t num_drops = 0; |
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// The drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
const double kErrorTolerance = 0.2; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf( |
|
|
|
|
|
|
|
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// The EDS service got a single request, and sent a single response.
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, DropPerHundred) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 5000; |
|
|
|
|
|
|
|
const uint32_t kDropPerHundredForLb = 10; |
|
|
|
|
|
|
|
const double kDropRateForLb = kDropPerHundredForLb / 100.0; |
|
|
|
|
|
|
|
// The EDS response contains one drop category.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerHundredForLb}}, |
|
|
|
|
|
|
|
FractionalPercent::HUNDRED), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
WaitForAllBackends(); |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
size_t num_drops = 0; |
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// The drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
const double kErrorTolerance = 0.2; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(kDropRateForLb * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// The EDS service got a single request, and sent a single response.
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, DropPerTenThousand) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 5000; |
|
|
|
|
|
|
|
const uint32_t kDropPerTenThousandForLb = 1000; |
|
|
|
|
|
|
|
const double kDropRateForLb = kDropPerTenThousandForLb / 10000.0; |
|
|
|
|
|
|
|
// The EDS response contains one drop category.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerTenThousandForLb}}, |
|
|
|
|
|
|
|
FractionalPercent::TEN_THOUSAND), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
WaitForAllBackends(); |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
size_t num_drops = 0; |
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// The drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
const double kErrorTolerance = 0.2; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(kDropRateForLb * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// The EDS service got a single request, and sent a single response.
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, DropUpdate) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 5000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForLb = 100000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForThrottle = 200000; |
|
|
|
|
|
|
|
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0; |
|
|
|
|
|
|
|
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0; |
|
|
|
|
|
|
|
const double KDropRateForLbAndThrottle = |
|
|
|
|
|
|
|
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; |
|
|
|
|
|
|
|
// The first EDS response contains one drop category.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerMillionForLb}}), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
// The second EDS response contains two drop categories.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
|
|
|
|
GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerMillionForLb}, |
|
|
|
|
|
|
|
{kThrottleDropType, kDropPerMillionForThrottle}}), |
|
|
|
|
|
|
|
5000); |
|
|
|
|
|
|
|
WaitForAllBackends(); |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
size_t num_drops = 0; |
|
|
|
|
|
|
|
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); |
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); |
|
|
|
|
|
|
|
// The drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
const double kErrorTolerance = 0.2; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf(::testing::Ge(kDropRateForLb * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(kDropRateForLb * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// Wait until the drop rate increases to the middle of the two configs, which
|
|
|
|
|
|
|
|
// implies that the update has been in effect.
|
|
|
|
|
|
|
|
const double kDropRateThreshold = |
|
|
|
|
|
|
|
(kDropRateForLb + KDropRateForLbAndThrottle) / 2; |
|
|
|
|
|
|
|
size_t num_rpcs = kNumRpcs; |
|
|
|
|
|
|
|
while (seen_drop_rate < kDropRateThreshold) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
++num_rpcs; |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
seen_drop_rate = static_cast<double>(num_drops) / num_rpcs; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
num_drops = 0; |
|
|
|
|
|
|
|
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); |
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); |
|
|
|
|
|
|
|
// The new drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf( |
|
|
|
|
|
|
|
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// The EDS service got a single request,
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
// and sent two responses
|
|
|
|
|
|
|
|
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, DropAll) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 1000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForLb = 100000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForThrottle = 1000000; |
|
|
|
|
|
|
|
// The EDS response contains two drop categories.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
|
|
|
|
GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerMillionForLb}, |
|
|
|
|
|
|
|
{kThrottleDropType, kDropPerMillionForThrottle}}), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and all of them are dropped.
|
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
EXPECT_TRUE(!status.ok() && status.error_message() == |
|
|
|
|
|
|
|
"Call dropped by load balancing policy"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// The EDS service got a single request, and sent a single response.
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerTest, Fallback) { |
|
|
|
TEST_F(SingleBalancerTest, Fallback) { |
|
|
|
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); |
|
|
|
const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); |
|
|
|
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); |
|
|
|
const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); |
|
|
@ -1034,7 +1306,7 @@ TEST_F(SingleBalancerTest, Fallback) { |
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs.
|
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs.
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, |
|
|
|
0, |
|
|
|
EdsServiceImpl::BuildResponseForBackends( |
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
GetBackendPortsInGroups(kNumBackendsInResolution /* start_index */)), |
|
|
|
GetBackendPortsInGroups(kNumBackendsInResolution /* start_index */)), |
|
|
|
kServerlistDelayMs); |
|
|
|
kServerlistDelayMs); |
|
|
|
// Wait until all the fallback backends are reachable.
|
|
|
|
// Wait until all the fallback backends are reachable.
|
|
|
@ -1083,7 +1355,7 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { |
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs.
|
|
|
|
// Send non-empty serverlist only after kServerlistDelayMs.
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, |
|
|
|
0, |
|
|
|
EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups( |
|
|
|
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups( |
|
|
|
kNumBackendsInResolution + |
|
|
|
kNumBackendsInResolution + |
|
|
|
kNumBackendsInResolutionUpdate /* start_index */)), |
|
|
|
kNumBackendsInResolutionUpdate /* start_index */)), |
|
|
|
kServerlistDelayMs); |
|
|
|
kServerlistDelayMs); |
|
|
@ -1184,10 +1456,8 @@ TEST_F(SingleBalancerTest, FallbackIfResponseReceivedButChildNotReady) { |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
// Send a serverlist that only contains an unreachable backend before fallback
|
|
|
|
// Send a serverlist that only contains an unreachable backend before fallback
|
|
|
|
// timeout.
|
|
|
|
// timeout.
|
|
|
|
ScheduleResponseForBalancer(0, |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
EdsServiceImpl::BuildResponseForBackends( |
|
|
|
0, EdsServiceImpl::BuildResponse({{grpc_pick_unused_port_or_die()}}), 0); |
|
|
|
{{grpc_pick_unused_port_or_die()}}), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
// Because no child policy is ready before fallback timeout, we enter fallback
|
|
|
|
// Because no child policy is ready before fallback timeout, we enter fallback
|
|
|
|
// mode.
|
|
|
|
// mode.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
@ -1199,9 +1469,12 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) { |
|
|
|
SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()}); |
|
|
|
SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()}); |
|
|
|
// Enter fallback mode because the LB channel fails to connect.
|
|
|
|
// Enter fallback mode because the LB channel fails to connect.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
|
// Return a new balancer that sends an empty serverlist.
|
|
|
|
// Return a new balancer that sends a response to drop all calls.
|
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponseForBackends({{}}), |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0); |
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, 1000000}}), |
|
|
|
|
|
|
|
0); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
// Send RPCs until failure.
|
|
|
|
// Send RPCs until failure.
|
|
|
|
gpr_timespec deadline = gpr_time_add( |
|
|
|
gpr_timespec deadline = gpr_time_add( |
|
|
@ -1222,7 +1495,7 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedAfterChildRready) { |
|
|
|
// Return a new balancer that sends a dead backend.
|
|
|
|
// Return a new balancer that sends a dead backend.
|
|
|
|
ShutdownBackend(1); |
|
|
|
ShutdownBackend(1); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends({{backends_[1]->port()}}), 0); |
|
|
|
0, EdsServiceImpl::BuildResponse({{backends_[1]->port()}}), 0); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
// The state (TRANSIENT_FAILURE) update from the child policy will be ignored
|
|
|
|
// The state (TRANSIENT_FAILURE) update from the child policy will be ignored
|
|
|
|
// because we are still in fallback mode.
|
|
|
|
// because we are still in fallback mode.
|
|
|
@ -1247,8 +1520,7 @@ TEST_F(SingleBalancerTest, BackendsRestart) { |
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), |
|
|
|
0, EdsServiceImpl::BuildResponse(GetBackendPortsInGroups()), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
WaitForAllBackends(); |
|
|
|
WaitForAllBackends(); |
|
|
|
// Stop backends. RPCs should fail.
|
|
|
|
// Stop backends. RPCs should fail.
|
|
|
|
ShutdownAllBackends(); |
|
|
|
ShutdownAllBackends(); |
|
|
@ -1269,10 +1541,10 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(first_backend), |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); |
|
|
|
0); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(second_backend), |
|
|
|
1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
@ -1322,10 +1594,10 @@ TEST_F(UpdatesTest, UpdateBalancerName) { |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(first_backend), |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); |
|
|
|
0); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(second_backend), |
|
|
|
1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
@ -1393,10 +1665,10 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(first_backend), |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); |
|
|
|
0); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(second_backend), |
|
|
|
1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
// Wait until the first backend is ready.
|
|
|
|
WaitForBackend(0); |
|
|
|
WaitForBackend(0); |
|
|
@ -1461,10 +1733,10 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { |
|
|
|
SetNextResolutionForLbChannel({balancers_[0]->port()}); |
|
|
|
SetNextResolutionForLbChannel({balancers_[0]->port()}); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto first_backend = GetBackendPortsInGroups(0, 1); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
auto second_backend = GetBackendPortsInGroups(1, 2); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponse(first_backend), |
|
|
|
0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); |
|
|
|
0); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer(1, EdsServiceImpl::BuildResponse(second_backend), |
|
|
|
1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); |
|
|
|
0); |
|
|
|
|
|
|
|
|
|
|
|
// Start servers and send 10 RPCs per server.
|
|
|
|
// Start servers and send 10 RPCs per server.
|
|
|
|
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); |
|
|
|
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); |
|
|
@ -1535,14 +1807,6 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { |
|
|
|
// TODO(juanlishen): Add TEST_F(UpdatesWithClientLoadReportingTest,
|
|
|
|
// TODO(juanlishen): Add TEST_F(UpdatesWithClientLoadReportingTest,
|
|
|
|
// ReresolveDeadBalancer)
|
|
|
|
// ReresolveDeadBalancer)
|
|
|
|
|
|
|
|
|
|
|
|
// The drop tests are deferred because the drop handling hasn't been added yet.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(roth): Add TEST_F(SingleBalancerTest, Drop)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(roth): Add TEST_F(SingleBalancerTest, DropAllFirst)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// TODO(roth): Add TEST_F(SingleBalancerTest, DropAll)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SingleBalancerWithClientLoadReportingTest : public XdsEnd2endTest { |
|
|
|
class SingleBalancerWithClientLoadReportingTest : public XdsEnd2endTest { |
|
|
|
public: |
|
|
|
public: |
|
|
|
SingleBalancerWithClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {} |
|
|
|
SingleBalancerWithClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {} |
|
|
@ -1555,7 +1819,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { |
|
|
|
// TODO(juanlishen): Partition the backends after multiple localities is
|
|
|
|
// TODO(juanlishen): Partition the backends after multiple localities is
|
|
|
|
// tested.
|
|
|
|
// tested.
|
|
|
|
ScheduleResponseForBalancer(0, |
|
|
|
ScheduleResponseForBalancer(0, |
|
|
|
EdsServiceImpl::BuildResponseForBackends( |
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
GetBackendPortsInGroups(0, backends_.size())), |
|
|
|
GetBackendPortsInGroups(0, backends_.size())), |
|
|
|
0); |
|
|
|
0); |
|
|
|
// Wait until all backends are ready.
|
|
|
|
// Wait until all backends are ready.
|
|
|
@ -1595,7 +1859,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) { |
|
|
|
backends_.size() - kNumBackendsFirstPass; |
|
|
|
backends_.size() - kNumBackendsFirstPass; |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, |
|
|
|
0, |
|
|
|
EdsServiceImpl::BuildResponseForBackends( |
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
GetBackendPortsInGroups(0, kNumBackendsFirstPass)), |
|
|
|
GetBackendPortsInGroups(0, kNumBackendsFirstPass)), |
|
|
|
0); |
|
|
|
0); |
|
|
|
// Wait until all backends returned by the balancer are ready.
|
|
|
|
// Wait until all backends returned by the balancer are ready.
|
|
|
@ -1626,7 +1890,7 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) { |
|
|
|
balancers_[0]->Start(server_host_); |
|
|
|
balancers_[0]->Start(server_host_); |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
ScheduleResponseForBalancer( |
|
|
|
0, |
|
|
|
0, |
|
|
|
EdsServiceImpl::BuildResponseForBackends( |
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
GetBackendPortsInGroups(kNumBackendsFirstPass)), |
|
|
|
GetBackendPortsInGroups(kNumBackendsFirstPass)), |
|
|
|
0); |
|
|
|
0); |
|
|
|
// Wait for queries to start going to one of the new backends.
|
|
|
|
// Wait for queries to start going to one of the new backends.
|
|
|
@ -1646,7 +1910,75 @@ TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) { |
|
|
|
EXPECT_EQ(0U, client_stats->total_dropped_requests()); |
|
|
|
EXPECT_EQ(0U, client_stats->total_dropped_requests()); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// TODO(juanlishen): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Drop)
|
|
|
|
class SingleBalancerWithClientLoadReportingAndDropTest : public XdsEnd2endTest { |
|
|
|
|
|
|
|
public: |
|
|
|
|
|
|
|
SingleBalancerWithClientLoadReportingAndDropTest() |
|
|
|
|
|
|
|
: XdsEnd2endTest(4, 1, 20) {} |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST_F(SingleBalancerWithClientLoadReportingAndDropTest, Vanilla) { |
|
|
|
|
|
|
|
SetNextResolution({}, kDefaultServiceConfig_.c_str()); |
|
|
|
|
|
|
|
SetNextResolutionForLbChannelAllBalancers(); |
|
|
|
|
|
|
|
const size_t kNumRpcs = 3000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForLb = 100000; |
|
|
|
|
|
|
|
const uint32_t kDropPerMillionForThrottle = 200000; |
|
|
|
|
|
|
|
const double kDropRateForLb = kDropPerMillionForLb / 1000000.0; |
|
|
|
|
|
|
|
const double kDropRateForThrottle = kDropPerMillionForThrottle / 1000000.0; |
|
|
|
|
|
|
|
const double KDropRateForLbAndThrottle = |
|
|
|
|
|
|
|
kDropRateForLb + (1 - kDropRateForLb) * kDropRateForThrottle; |
|
|
|
|
|
|
|
// The EDS response contains two drop categories.
|
|
|
|
|
|
|
|
ScheduleResponseForBalancer( |
|
|
|
|
|
|
|
0, |
|
|
|
|
|
|
|
EdsServiceImpl::BuildResponse( |
|
|
|
|
|
|
|
GetBackendPortsInGroups(), |
|
|
|
|
|
|
|
{{kLbDropType, kDropPerMillionForLb}, |
|
|
|
|
|
|
|
{kThrottleDropType, kDropPerMillionForThrottle}}), |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
int num_ok = 0; |
|
|
|
|
|
|
|
int num_failure = 0; |
|
|
|
|
|
|
|
int num_drops = 0; |
|
|
|
|
|
|
|
std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); |
|
|
|
|
|
|
|
const size_t num_warmup = num_ok + num_failure + num_drops; |
|
|
|
|
|
|
|
// Send kNumRpcs RPCs and count the drops.
|
|
|
|
|
|
|
|
for (size_t i = 0; i < kNumRpcs; ++i) { |
|
|
|
|
|
|
|
EchoResponse response; |
|
|
|
|
|
|
|
const Status status = SendRpc(&response); |
|
|
|
|
|
|
|
if (!status.ok() && |
|
|
|
|
|
|
|
status.error_message() == "Call dropped by load balancing policy") { |
|
|
|
|
|
|
|
++num_drops; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
EXPECT_TRUE(status.ok()) << "code=" << status.error_code() |
|
|
|
|
|
|
|
<< " message=" << status.error_message(); |
|
|
|
|
|
|
|
EXPECT_EQ(response.message(), kRequestMessage_); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// The drop rate should be roughly equal to the expectation.
|
|
|
|
|
|
|
|
const double seen_drop_rate = static_cast<double>(num_drops) / kNumRpcs; |
|
|
|
|
|
|
|
const double kErrorTolerance = 0.2; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
seen_drop_rate, |
|
|
|
|
|
|
|
::testing::AllOf( |
|
|
|
|
|
|
|
::testing::Ge(KDropRateForLbAndThrottle * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(KDropRateForLbAndThrottle * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// Check client stats.
|
|
|
|
|
|
|
|
ClientStats* client_stats = balancers_[0]->lrs_service()->WaitForLoadReport(); |
|
|
|
|
|
|
|
EXPECT_EQ(num_drops, client_stats->total_dropped_requests()); |
|
|
|
|
|
|
|
const size_t total_rpc = num_warmup + kNumRpcs; |
|
|
|
|
|
|
|
EXPECT_THAT( |
|
|
|
|
|
|
|
client_stats->dropped_requests(kLbDropType), |
|
|
|
|
|
|
|
::testing::AllOf( |
|
|
|
|
|
|
|
::testing::Ge(total_rpc * kDropRateForLb * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(total_rpc * kDropRateForLb * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
EXPECT_THAT(client_stats->dropped_requests(kThrottleDropType), |
|
|
|
|
|
|
|
::testing::AllOf( |
|
|
|
|
|
|
|
::testing::Ge(total_rpc * (1 - kDropRateForLb) * |
|
|
|
|
|
|
|
kDropRateForThrottle * (1 - kErrorTolerance)), |
|
|
|
|
|
|
|
::testing::Le(total_rpc * (1 - kDropRateForLb) * |
|
|
|
|
|
|
|
kDropRateForThrottle * (1 + kErrorTolerance)))); |
|
|
|
|
|
|
|
// The EDS service got a single request, and sent a single response.
|
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); |
|
|
|
|
|
|
|
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
} // namespace
|
|
|
|
} // namespace testing
|
|
|
|
} // namespace testing
|
|
|
|