Handle EDS response update in locality map

pull/19827/head
Juanli Shen 6 years ago
parent 79fc2e5c56
commit c1713800e0
  1. 5
      include/grpc/impl/codegen/grpc_types.h
  2. 144
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  3. 21
      src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc
  4. 116
      test/cpp/end2end/xds_end2end_test.cc

@ -330,6 +330,11 @@ typedef struct {
balancer before using fallback backend addresses from the resolver.
If 0, enter fallback mode immediately. Default value is 10000. */
#define GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS "grpc.xds_fallback_timeout_ms"
/* Time in milliseconds to wait before a locality is deleted after it's removed
from the received EDS update. If 0, delete the locality immediately. Default
value is 15 minutes. */
#define GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS \
"grpc.xds_locality_retention_interval_ms"
/** If non-zero, grpc server's cronet compression workaround will be enabled */
#define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \
"grpc.workaround.cronet_compression"

@ -109,6 +109,7 @@
#define GRPC_XDS_RECONNECT_JITTER 0.2
#define GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS 10000
#define GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS 1000
#define GRPC_XDS_DEFAULT_LOCALITY_RETENTION_INTERVAL_MS (15 * 60 * 1000)
namespace grpc_core {
@ -452,15 +453,15 @@ class XdsLb : public LoadBalancingPolicy {
class LocalityEntry : public InternallyRefCounted<LocalityEntry> {
public:
LocalityEntry(RefCountedPtr<XdsLb> parent,
RefCountedPtr<XdsLocalityName> name,
uint32_t locality_weight);
RefCountedPtr<XdsLocalityName> name);
~LocalityEntry();
void UpdateLocked(ServerAddressList serverlist,
void UpdateLocked(uint32_t locality_weight, ServerAddressList serverlist,
LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args);
void ShutdownLocked();
void ResetBackoffLocked();
void DeactivateLocked();
void Orphan() override;
grpc_connectivity_state connectivity_state() const {
@ -504,6 +505,8 @@ class XdsLb : public LoadBalancingPolicy {
grpc_channel_args* CreateChildPolicyArgsLocked(
const grpc_channel_args* args);
static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error);
RefCountedPtr<XdsLb> parent_;
RefCountedPtr<XdsLocalityName> name_;
OrphanablePtr<LoadBalancingPolicy> child_policy_;
@ -511,20 +514,22 @@ class XdsLb : public LoadBalancingPolicy {
RefCountedPtr<PickerWrapper> picker_wrapper_;
grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_IDLE;
uint32_t locality_weight_;
grpc_closure on_delayed_removal_timer_;
grpc_timer delayed_removal_timer_;
bool delayed_removal_timer_callback_pending_ = false;
};
explicit LocalityMap(XdsLb* xds_policy) : xds_policy_(xds_policy) {}
void UpdateLocked(const XdsLocalityList& locality_list,
LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args, XdsLb* parent);
const grpc_channel_args* args, XdsLb* parent,
bool is_initial_update = false);
void UpdateXdsPickerLocked();
void ShutdownLocked();
void ResetBackoffLocked();
private:
void PruneLocalities(const XdsLocalityList& locality_list);
XdsLb* xds_policy_;
Map<RefCountedPtr<XdsLocalityName>, OrphanablePtr<LocalityEntry>,
XdsLocalityName::Less>
@ -602,6 +607,7 @@ class XdsLb : public LoadBalancingPolicy {
// The policy to use for the backends.
RefCountedPtr<LoadBalancingPolicy::Config> child_policy_config_;
const grpc_millis locality_retention_interval_ms_;
// Map of policies to use in the backend
LocalityMap locality_map_;
// TODO(mhaidry) : Add support for multiple maps of localities
@ -1711,6 +1717,9 @@ XdsLb::XdsLb(Args args)
lb_fallback_timeout_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_XDS_FALLBACK_TIMEOUT_MS,
{GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS, 0, INT_MAX})),
locality_retention_interval_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS,
{GRPC_XDS_DEFAULT_LOCALITY_RETENTION_INTERVAL_MS, 0, INT_MAX})),
locality_map_(this) {
// Record server name.
const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI);
@ -1837,7 +1846,7 @@ void XdsLb::UpdateLocked(UpdateArgs args) {
}
ProcessAddressesAndChannelArgsLocked(std::move(args.addresses), *args.args);
locality_map_.UpdateLocked(locality_list_, child_policy_config_.get(), args_,
this);
this, is_initial_update);
// Update the existing fallback policy. The fallback policy config and/or the
// fallback addresses may be new.
if (fallback_policy_ != nullptr) UpdateFallbackPolicyLocked();
@ -2035,27 +2044,12 @@ void XdsLb::MaybeExitFallbackMode() {
// XdsLb::LocalityMap
//
void XdsLb::LocalityMap::PruneLocalities(const XdsLocalityList& locality_list) {
for (auto iter = map_.begin(); iter != map_.end();) {
bool found = false;
for (size_t i = 0; i < locality_list.size(); i++) {
if (*locality_list[i].locality_name == *iter->first) {
found = true;
break;
}
}
if (!found) { // Remove entries not present in the locality list.
iter = map_.erase(iter);
} else
iter++;
}
}
void XdsLb::LocalityMap::UpdateLocked(
const XdsLocalityList& locality_list,
LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args, XdsLb* parent) {
const grpc_channel_args* args, XdsLb* parent, bool is_initial_update) {
if (parent->shutting_down_) return;
// Add or update the localities in locality_list.
for (size_t i = 0; i < locality_list.size(); i++) {
auto& locality_name = locality_list[i].locality_name;
auto iter = map_.find(locality_name);
@ -2063,19 +2057,35 @@ void XdsLb::LocalityMap::UpdateLocked(
// locality list.
if (iter == map_.end()) {
OrphanablePtr<LocalityEntry> new_entry = MakeOrphanable<LocalityEntry>(
parent->Ref(DEBUG_LOCATION, "LocalityEntry"), locality_name,
locality_list[i].lb_weight);
parent->Ref(DEBUG_LOCATION, "LocalityEntry"), locality_name);
iter = map_.emplace(locality_name, std::move(new_entry)).first;
}
// Keep a copy of serverlist in locality_list_ so that we can compare it
// with the future ones.
iter->second->UpdateLocked(locality_list[i].serverlist, child_policy_config,
iter->second->UpdateLocked(locality_list[i].lb_weight,
locality_list[i].serverlist, child_policy_config,
args);
}
PruneLocalities(locality_list);
// Remove (later) the localities not in locality_list.
for (auto& p : map_) {
const XdsLocalityName* locality_name = p.first.get();
LocalityEntry* locality_entry = p.second.get();
bool in_locality_list = false;
for (size_t i = 0; i < locality_list.size(); ++i) {
if (*locality_list[i].locality_name == *locality_name) {
in_locality_list = true;
break;
}
}
if (!in_locality_list) locality_entry->DeactivateLocked();
}
// Generate a new xds picker immediately.
if (!is_initial_update) UpdateXdsPickerLocked();
}
void XdsLb::LocalityMap::UpdateXdsPickerLocked() {
// If we are in fallback mode, don't generate an xds picker from localities.
if (xds_policy_->fallback_policy_ != nullptr) return;
// Construct a new xds picker which maintains a map of all locality pickers
// that are ready. Each locality is represented by a portion of the range
// proportional to its weight, such that the total range is the sum of the
@ -2086,23 +2096,8 @@ void XdsLb::LocalityMap::UpdateXdsPickerLocked() {
size_t num_transient_failures = 0;
Picker::PickerList pickers;
for (auto& p : map_) {
// TODO(juanlishen): We should prune a locality (and kill its stats) after
// we know we won't pick from it. We need to improve our update logic to
// make that easier. Consider the following situation: the current map has
// two READY localities A and B, and the update only contains B with the
// same addresses as before. Without the following hack, we will generate
// the same picker containing A and B because we haven't pruned A when the
// update happens. Remove the for loop below once we implement the locality
// map update.
bool in_locality_list = false;
for (size_t i = 0; i < xds_policy_->locality_list_.size(); ++i) {
if (*xds_policy_->locality_list_[i].locality_name == *p.first) {
in_locality_list = true;
break;
}
}
if (!in_locality_list) continue;
const LocalityEntry* entry = p.second.get();
if (entry->locality_weight() == 0) continue;
switch (entry->connectivity_state()) {
case GRPC_CHANNEL_READY: {
end += entry->locality_weight();
@ -2121,10 +2116,8 @@ void XdsLb::LocalityMap::UpdateXdsPickerLocked() {
num_transient_failures++;
break;
}
default: {
gpr_log(GPR_ERROR, "Invalid locality connectivity state - %d",
entry->connectivity_state());
}
default:
GPR_UNREACHABLE_CODE(return );
}
}
// Pass on the constructed xds picker if it has any ready pickers in their map
@ -2148,11 +2141,9 @@ void XdsLb::LocalityMap::UpdateXdsPickerLocked() {
UniquePtr<SubchannelPicker>(
New<QueuePicker>(xds_policy_->Ref(DEBUG_LOCATION, "QueuePicker"))));
} else {
GPR_ASSERT(num_transient_failures ==
xds_policy_->locality_map_.map_.size());
grpc_error* error =
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"connections to all localities failing"),
"connections to all active localities failing"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
xds_policy_->channel_control_helper()->UpdateState(
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -2173,15 +2164,14 @@ void XdsLb::LocalityMap::ResetBackoffLocked() {
//
XdsLb::LocalityMap::LocalityEntry::LocalityEntry(
RefCountedPtr<XdsLb> parent, RefCountedPtr<XdsLocalityName> name,
uint32_t locality_weight)
: parent_(std::move(parent)),
name_(std::move(name)),
locality_weight_(locality_weight) {
RefCountedPtr<XdsLb> parent, RefCountedPtr<XdsLocalityName> name)
: parent_(std::move(parent)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) {
gpr_log(GPR_INFO, "[xdslb %p] created LocalityEntry %p for %s",
parent_.get(), this, name_->AsHumanReadableString());
}
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimerLocked,
this, grpc_combiner_scheduler(parent_->combiner()));
}
XdsLb::LocalityMap::LocalityEntry::~LocalityEntry() {
@ -2245,10 +2235,15 @@ XdsLb::LocalityMap::LocalityEntry::CreateChildPolicyLocked(
}
void XdsLb::LocalityMap::LocalityEntry::UpdateLocked(
ServerAddressList serverlist,
uint32_t locality_weight, ServerAddressList serverlist,
LoadBalancingPolicy::Config* child_policy_config,
const grpc_channel_args* args_in) {
if (parent_->shutting_down_) return;
// Update locality weight.
locality_weight_ = locality_weight;
if (delayed_removal_timer_callback_pending_) {
grpc_timer_cancel(&delayed_removal_timer_);
}
// Construct update args.
UpdateArgs update_args;
update_args.addresses = std::move(serverlist);
@ -2373,6 +2368,9 @@ void XdsLb::LocalityMap::LocalityEntry::ShutdownLocked() {
// Drop our ref to the child's picker, in case it's holding a ref to
// the child.
picker_wrapper_.reset();
if (delayed_removal_timer_callback_pending_) {
grpc_timer_cancel(&delayed_removal_timer_);
}
}
void XdsLb::LocalityMap::LocalityEntry::ResetBackoffLocked() {
@ -2387,6 +2385,36 @@ void XdsLb::LocalityMap::LocalityEntry::Orphan() {
Unref();
}
void XdsLb::LocalityMap::LocalityEntry::DeactivateLocked() {
// If locality retaining is disabled, delete the locality immediately.
if (parent_->locality_retention_interval_ms_ == 0) {
parent_->locality_map_.map_.erase(name_);
return;
}
// If already deactivated, don't do that again.
if (locality_weight_ == 0) return;
// Set the locality weight to 0 so that future xds picker won't contain this
// locality.
locality_weight_ = 0;
// Start a timer to delete the locality.
Ref(DEBUG_LOCATION, "LocalityEntry+timer").release();
grpc_timer_init(
&delayed_removal_timer_,
ExecCtx::Get()->Now() + parent_->locality_retention_interval_ms_,
&on_delayed_removal_timer_);
delayed_removal_timer_callback_pending_ = true;
}
void XdsLb::LocalityMap::LocalityEntry::OnDelayedRemovalTimerLocked(
void* arg, grpc_error* error) {
LocalityEntry* self = static_cast<LocalityEntry*>(arg);
self->delayed_removal_timer_callback_pending_ = false;
if (error == GRPC_ERROR_NONE && self->locality_weight_ == 0) {
self->parent_->locality_map_.map_.erase(self->name_);
}
self->Unref(DEBUG_LOCATION, "LocalityEntry+timer");
}
//
// XdsLb::LocalityEntry::Helper
//
@ -2446,8 +2474,6 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::UpdateState(
entry_->parent_->MaybeCancelFallbackAtStartupChecks();
entry_->parent_->MaybeExitFallbackMode();
}
// If we are in fallback mode, ignore update request from the child policy.
if (entry_->parent_->fallback_policy_ != nullptr) return;
GPR_ASSERT(entry_->parent_->lb_chand_ != nullptr);
// Cache the picker and its state in the entry.
entry_->picker_wrapper_ = MakeRefCounted<PickerWrapper>(

@ -137,6 +137,16 @@ UniquePtr<char> StringCopy(const upb_strview& strview) {
grpc_error* LocalityParse(
const envoy_api_v2_endpoint_LocalityLbEndpoints* locality_lb_endpoints,
XdsLocalityInfo* locality_info) {
// Parse LB weight.
const google_protobuf_UInt32Value* lb_weight =
envoy_api_v2_endpoint_LocalityLbEndpoints_load_balancing_weight(
locality_lb_endpoints);
// If LB weight is not specified, it means this locality is assigned no load.
// TODO(juanlishen): When we support CDS to configure the inter-locality
// policy, we should change the LB weight handling.
locality_info->lb_weight =
lb_weight != nullptr ? google_protobuf_UInt32Value_value(lb_weight) : 0;
if (locality_info->lb_weight == 0) return GRPC_ERROR_NONE;
// Parse locality name.
const envoy_api_v2_core_Locality* locality =
envoy_api_v2_endpoint_LocalityLbEndpoints_locality(locality_lb_endpoints);
@ -154,14 +164,7 @@ grpc_error* LocalityParse(
&locality_info->serverlist);
if (error != GRPC_ERROR_NONE) return error;
}
// Parse the lb_weight and priority.
const google_protobuf_UInt32Value* lb_weight =
envoy_api_v2_endpoint_LocalityLbEndpoints_load_balancing_weight(
locality_lb_endpoints);
// If LB weight is not specified, the default weight 0 is used, which means
// this locality is assigned no load.
locality_info->lb_weight =
lb_weight != nullptr ? google_protobuf_UInt32Value_value(lb_weight) : 0;
// Parse the priority.
locality_info->priority =
envoy_api_v2_endpoint_LocalityLbEndpoints_priority(locality_lb_endpoints);
return GRPC_ERROR_NONE;
@ -253,6 +256,8 @@ grpc_error* XdsEdsResponseDecodeAndParse(const grpc_slice& encoded_response,
XdsLocalityInfo locality_info;
grpc_error* error = LocalityParse(endpoints[i], &locality_info);
if (error != GRPC_ERROR_NONE) return error;
// Filter out locality with weight 0.
if (locality_info.lb_weight == 0) continue;
update->locality_list.push_back(std::move(locality_info));
}
// The locality list is sorted here into deterministic order so that it's

@ -18,6 +18,7 @@
#include <memory>
#include <mutex>
#include <numeric>
#include <set>
#include <sstream>
#include <thread>
@ -316,9 +317,11 @@ class EdsServiceImpl : public EdsService {
gpr_log(GPR_INFO, "LB[%p]: shut down", this);
}
// TODO(juanlishen): Put the args into a struct.
static DiscoveryResponse BuildResponse(
const std::vector<std::vector<int>>& backend_ports,
const std::vector<int>& lb_weights = {},
size_t first_locality_name_index = 0,
const std::map<grpc::string, uint32_t>& drop_categories = {},
const FractionalPercent::DenominatorType denominator =
FractionalPercent::MILLION) {
@ -333,7 +336,8 @@ class EdsServiceImpl : public EdsService {
endpoints->mutable_locality()->set_region(kDefaultLocalityRegion);
endpoints->mutable_locality()->set_zone(kDefaultLocalityZone);
std::ostringstream sub_zone;
sub_zone << kDefaultLocalitySubzone << '_' << i;
sub_zone << kDefaultLocalitySubzone << '_'
<< first_locality_name_index + i;
endpoints->mutable_locality()->set_sub_zone(sub_zone.str());
for (const int& backend_port : backend_ports[i]) {
auto* lb_endpoints = endpoints->add_lb_endpoints();
@ -1114,8 +1118,102 @@ TEST_F(SingleBalancerTest, LocalityMapStressTest) {
// Wait until backend 1 is ready, before which kNumLocalities localities are
// removed by the xds policy.
WaitForBackend(1);
// The EDS service got a single request.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
// and sent two responses.
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
TEST_F(SingleBalancerTest, LocalityMapUpdate) {
SetNextResolution({}, kDefaultServiceConfig_.c_str());
SetNextResolutionForLbChannelAllBalancers();
const size_t kNumRpcs = 1000;
// The locality weight for the first 3 localities.
const std::vector<int> kLocalityWeights0 = {2, 3, 4};
const double kTotalLocalityWeight0 =
std::accumulate(kLocalityWeights0.begin(), kLocalityWeights0.end(), 0);
std::vector<double> locality_weight_rate_0;
for (int weight : kLocalityWeights0) {
locality_weight_rate_0.push_back(weight / kTotalLocalityWeight0);
}
// Delete the first locality, keep the second locality, change the third
// locality's weight from 4 to 2, and add a new locality with weight 6.
const std::vector<int> kLocalityWeights1 = {3, 2, 6};
const double kTotalLocalityWeight1 =
std::accumulate(kLocalityWeights1.begin(), kLocalityWeights1.end(), 0);
std::vector<double> locality_weight_rate_1 = {
0 /* placeholder for locality 0 */};
for (int weight : kLocalityWeights1) {
locality_weight_rate_1.push_back(weight / kTotalLocalityWeight1);
}
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(0 /*start_index*/, 3 /*stop_index*/,
3 /*num_group*/),
kLocalityWeights0),
0);
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(1 /*start_index*/, 4 /*stop_index*/,
3 /*num_group*/),
kLocalityWeights1, 1 /*first_locality_name_index*/),
5000);
// Wait for the first 3 backends to be ready.
WaitForAllBackends(1, 0, 3);
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
// The picking rates of the first 3 backends should be roughly equal to the
// expectation.
std::vector<double> locality_picked_rates;
for (size_t i = 0; i < 3; ++i) {
locality_picked_rates.push_back(
static_cast<double>(backends_[i]->backend_service()->request_count()) /
kNumRpcs);
}
const double kErrorTolerance = 0.2;
for (size_t i = 0; i < 3; ++i) {
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
::testing::Ge(locality_weight_rate_0[i] * (1 - kErrorTolerance)),
::testing::Le(locality_weight_rate_0[i] * (1 + kErrorTolerance))));
}
// Backend 3 hasn't received any request.
EXPECT_EQ(0U, backends_[3]->backend_service()->request_count());
// The EDS service got a single request, and sent a single response.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count());
// Wait until the locality update has been processed, as signaled by backend 3
// receiving a request.
WaitForBackend(3);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
// Send kNumRpcs RPCs.
CheckRpcSendOk(kNumRpcs);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
// Backend 0 no longer receives any request.
EXPECT_EQ(0U, backends_[0]->backend_service()->request_count());
// The picking rates of the last 3 backends should be roughly equal to the
// expectation.
locality_picked_rates = {0 /* placeholder for backend 0 */};
for (size_t i = 1; i < 4; ++i) {
locality_picked_rates.push_back(
static_cast<double>(backends_[i]->backend_service()->request_count()) /
kNumRpcs);
}
for (size_t i = 1; i < 4; ++i) {
EXPECT_THAT(
locality_picked_rates[i],
::testing::AllOf(
::testing::Ge(locality_weight_rate_1[i] * (1 - kErrorTolerance)),
::testing::Le(locality_weight_rate_1[i] * (1 + kErrorTolerance))));
}
// The EDS service got a single request.
EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count());
// and sent two responses.
EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count());
}
@ -1133,7 +1231,7 @@ TEST_F(SingleBalancerTest, Drop) {
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(), {},
GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}}),
0);
@ -1174,7 +1272,7 @@ TEST_F(SingleBalancerTest, DropPerHundred) {
// The EDS response contains one drop category.
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {},
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerHundredForLb}},
FractionalPercent::HUNDRED),
0);
@ -1214,7 +1312,7 @@ TEST_F(SingleBalancerTest, DropPerTenThousand) {
// The EDS response contains one drop category.
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {},
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerTenThousandForLb}},
FractionalPercent::TEN_THOUSAND),
0);
@ -1258,7 +1356,7 @@ TEST_F(SingleBalancerTest, DropUpdate) {
// The first EDS response contains one drop category.
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {},
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerMillionForLb}}),
0);
// The second EDS response contains two drop categories.
@ -1268,7 +1366,7 @@ TEST_F(SingleBalancerTest, DropUpdate) {
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(), {},
GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}}),
10000);
@ -1354,7 +1452,7 @@ TEST_F(SingleBalancerTest, DropAll) {
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(), {},
GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}}),
0);
@ -1547,7 +1645,7 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) {
// Return a new balancer that sends a response to drop all calls.
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {},
EdsServiceImpl::BuildResponse(GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, 1000000}}),
0);
SetNextResolutionForLbChannelAllBalancers();
@ -2005,7 +2103,7 @@ TEST_F(SingleBalancerWithClientLoadReportingAndDropTest, Vanilla) {
ScheduleResponseForBalancer(
0,
EdsServiceImpl::BuildResponse(
GetBackendPortsInGroups(), {},
GetBackendPortsInGroups(), {}, 0,
{{kLbDropType, kDropPerMillionForLb},
{kThrottleDropType, kDropPerMillionForThrottle}}),
0);

Loading…
Cancel
Save