From 0fa6782cb838f28f940b62ce3d92f464f3040ed4 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 9 Mar 2020 16:30:31 -0700 Subject: [PATCH 01/37] Traffic Splitting RDS Policy Copy and pasted WeightedTarget to be the base of RDS. Commit the original WeightedTarget copy for easy comparison for future commits. --- .../client_channel/lb_policy/xds/rds.cc | 696 ++++++++++++++++++ 1 file changed, 696 insertions(+) create mode 100644 src/core/ext/filters/client_channel/lb_policy/xds/rds.cc diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc new file mode 100644 index 00000000000..af091fdfad6 --- /dev/null +++ b/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc @@ -0,0 +1,696 @@ +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include + +#include +#include +#include + +#include "absl/strings/str_cat.h" + +#include + +#include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy_factory.h" +#include "src/core/ext/filters/client_channel/lb_policy_registry.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" +#include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/gpr/string.h" +#include "src/core/lib/gprpp/orphanable.h" +#include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/iomgr/combiner.h" +#include "src/core/lib/iomgr/timer.h" + +#define GRPC_WEIGHTED_TARGET_CHILD_RETENTION_INTERVAL_MS (15 * 60 * 1000) + +namespace grpc_core { + +TraceFlag grpc_lb_weighted_target_trace(false, "weighted_target_lb"); + +namespace { + +constexpr char kWeightedTarget[] = "weighted_target_experimental"; + +// Config for weighted_target LB policy. +class WeightedTargetLbConfig : public LoadBalancingPolicy::Config { + public: + struct ChildConfig { + uint32_t weight; + RefCountedPtr config; + }; + + using TargetMap = std::map; + + explicit WeightedTargetLbConfig(TargetMap target_map) + : target_map_(std::move(target_map)) {} + + const char* name() const override { return kWeightedTarget; } + + const TargetMap& target_map() const { return target_map_; } + + private: + TargetMap target_map_; +}; + +// weighted_target LB policy. +class WeightedTargetLb : public LoadBalancingPolicy { + public: + explicit WeightedTargetLb(Args args); + + const char* name() const override { return kWeightedTarget; } + + void UpdateLocked(UpdateArgs args) override; + void ExitIdleLocked() override; + void ResetBackoffLocked() override; + + private: + // A simple wrapper for ref-counting a picker from the child policy. + class ChildPickerWrapper : public RefCounted { + public: + explicit ChildPickerWrapper(std::unique_ptr picker) + : picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { + return picker_->Pick(std::move(args)); + } + private: + std::unique_ptr picker_; + }; + + // Picks a child using stateless WRR and then delegates to that + // child's picker. + class WeightedPicker : public SubchannelPicker { + public: + // Maintains a weighted list of pickers from each child that is in + // ready state. The first element in the pair represents the end of a + // range proportional to the child's weight. The start of the range + // is the previous value in the vector and is 0 for the first element. + using PickerList = + InlinedVector>, 1>; + + WeightedPicker(RefCountedPtr parent, PickerList pickers) + : parent_(std::move(parent)), pickers_(std::move(pickers)) {} + ~WeightedPicker() { parent_.reset(DEBUG_LOCATION, "WeightedPicker"); } + + PickResult Pick(PickArgs args) override; + + private: + RefCountedPtr parent_; + PickerList pickers_; + }; + + // Each WeightedChild holds a ref to its parent WeightedTargetLb. + class WeightedChild : public InternallyRefCounted { + public: + WeightedChild(RefCountedPtr weighted_target_policy, + const std::string& name); + ~WeightedChild(); + + void Orphan() override; + + void UpdateLocked(const WeightedTargetLbConfig::ChildConfig& config, + const ServerAddressList& addresses, + const grpc_channel_args* args); + void ExitIdleLocked(); + void ResetBackoffLocked(); + void DeactivateLocked(); + + uint32_t weight() const { return weight_; } + grpc_connectivity_state connectivity_state() const { + return connectivity_state_; + } + RefCountedPtr picker_wrapper() const { + return picker_wrapper_; + } + + private: + class Helper : public ChannelControlHelper { + public: + explicit Helper(RefCountedPtr weighted_child) + : weighted_child_(std::move(weighted_child)) {} + + ~Helper() { weighted_child_.reset(DEBUG_LOCATION, "Helper"); } + + RefCountedPtr CreateSubchannel( + const grpc_channel_args& args) override; + void UpdateState(grpc_connectivity_state state, + std::unique_ptr picker) override; + void RequestReresolution() override; + void AddTraceEvent(TraceSeverity severity, StringView message) override; + + private: + RefCountedPtr weighted_child_; + }; + + // Methods for dealing with the child policy. + OrphanablePtr CreateChildPolicyLocked( + const grpc_channel_args* args); + + static void OnDelayedRemovalTimer(void* arg, grpc_error* error); + static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error); + + // The owning LB policy. + RefCountedPtr weighted_target_policy_; + + // Points to the corresponding key in WeightedTargetLb::targets_. + const std::string& name_; + + uint32_t weight_; + + OrphanablePtr child_policy_; + + RefCountedPtr picker_wrapper_; + grpc_connectivity_state connectivity_state_ = GRPC_CHANNEL_IDLE; + bool seen_failure_since_ready_ = false; + + // States for delayed removal. + grpc_timer delayed_removal_timer_; + grpc_closure on_delayed_removal_timer_; + bool delayed_removal_timer_callback_pending_ = false; + bool shutdown_ = false; + }; + + ~WeightedTargetLb(); + + void ShutdownLocked() override; + + void UpdateStateLocked(); + + const grpc_millis child_retention_interval_ms_; + + // Current config from the resolver. + RefCountedPtr config_; + + // Internal state. + bool shutting_down_ = false; + + // Children. + std::map> targets_; +}; + +// +// WeightedTargetLb::WeightedPicker +// + +WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick( + PickArgs args) { + // Generate a random number in [0, total weight). + const uint32_t key = rand() % pickers_[pickers_.size() - 1].first; + // Find the index in pickers_ corresponding to key. + size_t mid = 0; + size_t start_index = 0; + size_t end_index = pickers_.size() - 1; + size_t index = 0; + while (end_index > start_index) { + mid = (start_index + end_index) / 2; + if (pickers_[mid].first > key) { + end_index = mid; + } else if (pickers_[mid].first < key) { + start_index = mid + 1; + } else { + index = mid + 1; + break; + } + } + if (index == 0) index = start_index; + GPR_ASSERT(pickers_[index].first > key); + // Delegate to the child picker. + return pickers_[index].second->Pick(args); +} + +// +// WeightedTargetLb +// + +WeightedTargetLb::WeightedTargetLb(Args args) + : LoadBalancingPolicy(std::move(args)), +// FIXME: new channel arg + child_retention_interval_ms_(grpc_channel_args_find_integer( + args.args, GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS, + {GRPC_WEIGHTED_TARGET_CHILD_RETENTION_INTERVAL_MS, 0, INT_MAX})) {} + +WeightedTargetLb::~WeightedTargetLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] destroying weighted_target LB policy", + this); + } +} + +void WeightedTargetLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this); + } + shutting_down_ = true; + targets_.clear(); +} + +void WeightedTargetLb::ExitIdleLocked() { + for (auto& p : targets_) p.second->ExitIdleLocked(); +} + +void WeightedTargetLb::ResetBackoffLocked() { + for (auto& p : targets_) p.second->ResetBackoffLocked(); +} + +void WeightedTargetLb::UpdateLocked(UpdateArgs args) { + if (shutting_down_) return; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this); + } + // Update config. + config_ = std::move(args.config); + // Deactivate the targets not in the new config. + for (auto it = targets_.begin(); it != targets_.end();) { + const std::string& name = it->first; + WeightedChild* child = it->second.get(); + if (config_->target_map().find(name) != config_->target_map().end()) { + ++it; + continue; + } + if (child_retention_interval_ms_ == 0) { + it = targets_.erase(it); + } else { + child->DeactivateLocked(); + ++it; + } + } + // Add or update the targets in the new config. + for (const auto& p : config_->target_map()) { + const std::string& name = p.first; + const WeightedTargetLbConfig::ChildConfig& config = p.second; + auto it = targets_.find(name); + if (it == targets_.end()) { + it = targets_.emplace(std::make_pair(name, nullptr)).first; + it->second = MakeOrphanable( + Ref(DEBUG_LOCATION, "WeightedChild"), it->first); + } + it->second->UpdateLocked(config, args.addresses, args.args); + } +} + +void WeightedTargetLb::UpdateStateLocked() { + // Construct a new picker which maintains a map of all child pickers + // that are ready. Each child is represented by a portion of the range + // proportional to its weight, such that the total range is the sum of the + // weights of all children. + WeightedPicker::PickerList picker_list; + uint32_t end = 0; + // Also count the number of children in each state, to determine the + // overall state. + size_t num_connecting = 0; + size_t num_idle = 0; + size_t num_transient_failures = 0; + for (const auto& p : targets_) { + const auto& child_name = p.first; + const WeightedChild* child = p.second.get(); + // Skip the targets that are not in the latest update. + if (config_->target_map().find(child_name) == config_->target_map().end()) { + continue; + } + switch (child->connectivity_state()) { + case GRPC_CHANNEL_READY: { + end += child->weight(); + picker_list.push_back(std::make_pair(end, child->picker_wrapper())); + break; + } + case GRPC_CHANNEL_CONNECTING: { + ++num_connecting; + break; + } + case GRPC_CHANNEL_IDLE: { + ++num_idle; + break; + } + case GRPC_CHANNEL_TRANSIENT_FAILURE: { + ++num_transient_failures; + break; + } + default: + GPR_UNREACHABLE_CODE(return ); + } + } + // Determine aggregated connectivity state. + grpc_connectivity_state connectivity_state; + if (picker_list.size() > 0) { + connectivity_state = GRPC_CHANNEL_READY; + } else if (num_connecting > 0) { + connectivity_state = GRPC_CHANNEL_CONNECTING; + } else if (num_idle > 0) { + connectivity_state = GRPC_CHANNEL_IDLE; + } else { + connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s", + this, ConnectivityStateName(connectivity_state)); + } + std::unique_ptr picker; + switch (connectivity_state) { + case GRPC_CHANNEL_READY: + picker = absl::make_unique( + Ref(DEBUG_LOCATION, "WeightedPicker"), std::move(picker_list)); + break; + case GRPC_CHANNEL_CONNECTING: + case GRPC_CHANNEL_IDLE: + picker = absl::make_unique( + Ref(DEBUG_LOCATION, "QueuePicker")); + break; + default: + picker = absl::make_unique( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "weighted_target: all children report state TRANSIENT_FAILURE")); + } + channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); +} + +// +// WeightedTargetLb::WeightedChild +// + +WeightedTargetLb::WeightedChild::WeightedChild( + RefCountedPtr weighted_target_policy, + const std::string& name) + : weighted_target_policy_(std::move(weighted_target_policy)), name_(name) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s", + weighted_target_policy_.get(), this, name_.c_str()); + } +} + +WeightedTargetLb::WeightedChild::~WeightedChild() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: destroying child", + weighted_target_policy_.get(), this, name_.c_str()); + } + weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild"); +} + +void WeightedTargetLb::WeightedChild::Orphan() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: shutting down child", + weighted_target_policy_.get(), this, name_.c_str()); + } + // Remove the child policy's interested_parties pollset_set from the + // xDS policy. + grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), + weighted_target_policy_->interested_parties()); + child_policy_.reset(); + // Drop our ref to the child's picker, in case it's holding a ref to + // the child. + picker_wrapper_.reset(); + if (delayed_removal_timer_callback_pending_) { + grpc_timer_cancel(&delayed_removal_timer_); + } + shutdown_ = true; + Unref(); +} + +OrphanablePtr +WeightedTargetLb::WeightedChild::CreateChildPolicyLocked( + const grpc_channel_args* args) { + LoadBalancingPolicy::Args lb_policy_args; + lb_policy_args.combiner = weighted_target_policy_->combiner(); + lb_policy_args.args = args; + lb_policy_args.channel_control_helper = + absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); + OrphanablePtr lb_policy = + MakeOrphanable(std::move(lb_policy_args), + &grpc_lb_weighted_target_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: Created new child " + "policy handler %p", + weighted_target_policy_.get(), this, name_.c_str(), + lb_policy.get()); + } + // Add the xDS's interested_parties pollset_set to that of the newly created + // child policy. This will make the child policy progress upon activity on + // xDS LB, which in turn is tied to the application's call. + grpc_pollset_set_add_pollset_set( + lb_policy->interested_parties(), + weighted_target_policy_->interested_parties()); + return lb_policy; +} + +void WeightedTargetLb::WeightedChild::UpdateLocked( + const WeightedTargetLbConfig::ChildConfig& config, + const ServerAddressList& addresses, const grpc_channel_args* args) { + if (weighted_target_policy_->shutting_down_) return; + // Update child weight. + weight_ = config.weight; + // Reactivate if needed. + if (delayed_removal_timer_callback_pending_) { + grpc_timer_cancel(&delayed_removal_timer_); + } + // Create child policy if needed. + if (child_policy_ == nullptr) { + child_policy_ = CreateChildPolicyLocked(args); + } + // Construct update args. + UpdateArgs update_args; + update_args.config = config.config; + update_args.addresses = addresses; + update_args.args = grpc_channel_args_copy(args); + // Update the policy. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + gpr_log(GPR_INFO, + "[weighted_target_lb %p] WeightedChild %p %s: Updating child " + "policy handler %p", + weighted_target_policy_.get(), this, name_.c_str(), + child_policy_.get()); + } + child_policy_->UpdateLocked(std::move(update_args)); +} + +void WeightedTargetLb::WeightedChild::ExitIdleLocked() { + child_policy_->ExitIdleLocked(); +} + +void WeightedTargetLb::WeightedChild::ResetBackoffLocked() { + child_policy_->ResetBackoffLocked(); +} + +void WeightedTargetLb::WeightedChild::DeactivateLocked() { + // If already deactivated, don't do that again. + if (weight_ == 0) return; + // Set the child weight to 0 so that future picker won't contain this child. + weight_ = 0; + // Start a timer to delete the child. + Ref(DEBUG_LOCATION, "WeightedChild+timer").release(); + GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, + grpc_schedule_on_exec_ctx); + grpc_timer_init( + &delayed_removal_timer_, + ExecCtx::Get()->Now() + + weighted_target_policy_->child_retention_interval_ms_, + &on_delayed_removal_timer_); + delayed_removal_timer_callback_pending_ = true; +} + +void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg, + grpc_error* error) { + WeightedChild* self = static_cast(arg); + self->weighted_target_policy_->combiner()->Run( + GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, + OnDelayedRemovalTimerLocked, self, nullptr), + GRPC_ERROR_REF(error)); +} + +void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked( + void* arg, grpc_error* error) { + WeightedChild* self = static_cast(arg); + self->delayed_removal_timer_callback_pending_ = false; + if (error == GRPC_ERROR_NONE && !self->shutdown_ && self->weight_ == 0) { + self->weighted_target_policy_->targets_.erase(self->name_); + } + self->Unref(DEBUG_LOCATION, "WeightedChild+timer"); +} + +// +// WeightedTargetLb::WeightedChild::Helper +// + +RefCountedPtr +WeightedTargetLb::WeightedChild::Helper::CreateSubchannel( + const grpc_channel_args& args) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return nullptr; + return weighted_child_->weighted_target_policy_->channel_control_helper()->CreateSubchannel( + args); +} + +void WeightedTargetLb::WeightedChild::Helper::UpdateState( + grpc_connectivity_state state, std::unique_ptr picker) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + // Cache the picker in the WeightedChild. + weighted_child_->picker_wrapper_ = + MakeRefCounted(std::move(picker)); + // Decide what state to report for aggregation purposes. + // If we haven't seen a failure since the last time we were in state + // READY, then we report the state change as-is. However, once we do see + // a failure, we report TRANSIENT_FAILURE and ignore any subsequent state + // changes until we go back into state READY. + if (!weighted_child_->seen_failure_since_ready_) { + if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { + weighted_child_->seen_failure_since_ready_ = true; + } + } else { + if (state != GRPC_CHANNEL_READY) return; + weighted_child_->seen_failure_since_ready_ = false; + } + weighted_child_->connectivity_state_ = state; + // Notify the LB policy. + weighted_child_->weighted_target_policy_->UpdateStateLocked(); +} + +void WeightedTargetLb::WeightedChild::Helper::RequestReresolution() { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + weighted_child_->weighted_target_policy_->channel_control_helper()->RequestReresolution(); +} + +void WeightedTargetLb::WeightedChild::Helper::AddTraceEvent(TraceSeverity severity, + StringView message) { + if (weighted_child_->weighted_target_policy_->shutting_down_) return; + weighted_child_->weighted_target_policy_->channel_control_helper()->AddTraceEvent( + severity, message); +} + +// +// factory +// + +class WeightedTargetLbFactory : public LoadBalancingPolicyFactory { + public: + OrphanablePtr CreateLoadBalancingPolicy( + LoadBalancingPolicy::Args args) const override { + return MakeOrphanable(std::move(args)); + } + + const char* name() const override { return kWeightedTarget; } + + RefCountedPtr ParseLoadBalancingConfig( + const Json& json, grpc_error** error) const override { + GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); + if (json.type() == Json::Type::JSON_NULL) { + // weighted_target was mentioned as a policy in the deprecated + // loadBalancingPolicy field or in the client API. + *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:loadBalancingPolicy error:weighted_target policy requires " + "configuration. Please use loadBalancingConfig field of service " + "config instead."); + return nullptr; + } + std::vector error_list; + // Weight map. + WeightedTargetLbConfig::TargetMap target_map; + auto it = json.object_value().find("targets"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:targets error:required field not present")); + } else if (it->second.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:targets error:type should be object")); + } else { + for (const auto& p : it->second.object_value()) { + WeightedTargetLbConfig::ChildConfig child_config; + std::vector child_errors = + ParseChildConfig(p.second, &child_config); + if (!child_errors.empty()) { + // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error + // string is not static in this case. + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:targets key:", p.first).c_str()); + for (grpc_error* child_error : child_errors) { + error = grpc_error_add_child(error, child_error); + } + error_list.push_back(error); + } else { + target_map[p.first] = std::move(child_config); + } + } + } + if (!error_list.empty()) { + *error = GRPC_ERROR_CREATE_FROM_VECTOR( + "weighted_target_experimental LB policy config", &error_list); + return nullptr; + } + return MakeRefCounted(std::move(target_map)); + } + + private: + static std::vector ParseChildConfig( + const Json& json, WeightedTargetLbConfig::ChildConfig* child_config) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "value should be of type object")); + return error_list; + } + // Weight. + auto it = json.object_value().find("weight"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "required field \"weight\" not specified")); + } else if (it->second.type() != Json::Type::NUMBER) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:must be of type number")); + } else { + child_config->weight = + gpr_parse_nonnegative_int(it->second.string_value().c_str()); + if (child_config->weight == -1) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:unparseable value")); + } else if (child_config->weight == 0) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:weight error:value must be greater than zero")); + } + } + // Child policy. + it = json.object_value().find("childPolicy"); + if (it != json.object_value().end()) { + grpc_error* parse_error = GRPC_ERROR_NONE; + child_config->config = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second, + &parse_error); + if (child_config->config == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + std::vector child_errors; + child_errors.push_back(parse_error); + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); + } + } + return error_list; + } +}; + +} // namespace + +} // namespace grpc_core + +// +// Plugin registration +// + +void grpc_lb_policy_weighted_target_init() { + grpc_core::LoadBalancingPolicyRegistry::Builder:: + RegisterLoadBalancingPolicyFactory( + absl::make_unique()); +} + +void grpc_lb_policy_weighted_target_shutdown() {} From 7a146722db8cd12a8a710bd91ee86a5cbdf25ea8 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 9 Mar 2020 16:47:24 -0700 Subject: [PATCH 02/37] Harded coded a service config to point to xds_routing_experimental With just 1 action, which has 1 child policy: cds_experimental Basically wrapping the previous cds_experimental config inside the new xds_routing_experimental Tested to make sure all current tests still pass. This is just a skeleton code to allow new parsing code to be added and tested. --- BUILD | 26 + .../client_channel/lb_policy/xds/rds.cc | 456 ++++++++---------- .../filters/client_channel/xds/xds_client.cc | 11 +- .../plugin_registry/grpc_plugin_registry.cc | 4 + .../grpc_unsecure_plugin_registry.cc | 4 + test/cpp/end2end/xds_end2end_test.cc | 6 +- 6 files changed, 246 insertions(+), 261 deletions(-) diff --git a/BUILD b/BUILD index 8fe2d6d2e15..a0c9247e74e 100644 --- a/BUILD +++ b/BUILD @@ -320,6 +320,7 @@ grpc_cc_library( "grpc_common", "grpc_lb_policy_cds", "grpc_lb_policy_grpclb", + "grpc_lb_policy_rds", "grpc_lb_policy_xds", "grpc_resolver_xds", ], @@ -338,6 +339,7 @@ grpc_cc_library( "grpc_common", "grpc_lb_policy_cds_secure", "grpc_lb_policy_grpclb_secure", + "grpc_lb_policy_rds_secure", "grpc_lb_policy_xds_secure", "grpc_resolver_xds_secure", "grpc_secure", @@ -1397,6 +1399,30 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "grpc_lb_policy_rds", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/xds/rds.cc", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + ], +) + +grpc_cc_library( + name = "grpc_lb_policy_rds_secure", + srcs = [ + "src/core/ext/filters/client_channel/lb_policy/xds/rds.cc", + ], + language = "c++", + deps = [ + "grpc_base", + "grpc_client_channel", + ], +) + grpc_cc_library( name = "grpc_lb_subchannel_list", hdrs = [ diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc index af091fdfad6..6a730d2a512 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc @@ -25,9 +25,9 @@ #include #include "src/core/ext/filters/client_channel/lb_policy.h" +#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" #include "src/core/ext/filters/client_channel/lb_policy_factory.h" #include "src/core/ext/filters/client_channel/lb_policy_registry.h" -#include "src/core/ext/filters/client_channel/lb_policy/child_policy_handler.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/gpr/string.h" #include "src/core/lib/gprpp/orphanable.h" @@ -39,39 +39,38 @@ namespace grpc_core { -TraceFlag grpc_lb_weighted_target_trace(false, "weighted_target_lb"); +TraceFlag grpc_rds_lb_trace(false, "rds_lb"); namespace { -constexpr char kWeightedTarget[] = "weighted_target_experimental"; +constexpr char kRds[] = "xds_routing_experimental"; -// Config for weighted_target LB policy. -class WeightedTargetLbConfig : public LoadBalancingPolicy::Config { +// Config for rds LB policy. +class RdsLbConfig : public LoadBalancingPolicy::Config { public: struct ChildConfig { - uint32_t weight; RefCountedPtr config; }; - using TargetMap = std::map; + using ActionMap = std::map; - explicit WeightedTargetLbConfig(TargetMap target_map) - : target_map_(std::move(target_map)) {} + explicit RdsLbConfig(ActionMap action_map) + : action_map_(std::move(action_map)) {} - const char* name() const override { return kWeightedTarget; } + const char* name() const override { return kRds; } - const TargetMap& target_map() const { return target_map_; } + const ActionMap& action_map() const { return action_map_; } private: - TargetMap target_map_; + ActionMap action_map_; }; -// weighted_target LB policy. -class WeightedTargetLb : public LoadBalancingPolicy { +// rds LB policy. +class RdsLb : public LoadBalancingPolicy { public: - explicit WeightedTargetLb(Args args); + explicit RdsLb(Args args); - const char* name() const override { return kWeightedTarget; } + const char* name() const override { return kRds; } void UpdateLocked(UpdateArgs args) override; void ExitIdleLocked() override; @@ -83,53 +82,48 @@ class WeightedTargetLb : public LoadBalancingPolicy { public: explicit ChildPickerWrapper(std::unique_ptr picker) : picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { - return picker_->Pick(std::move(args)); - } + PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } + private: std::unique_ptr picker_; }; // Picks a child using stateless WRR and then delegates to that // child's picker. - class WeightedPicker : public SubchannelPicker { + class RdsPicker : public SubchannelPicker { public: - // Maintains a weighted list of pickers from each child that is in + // Maintains a rds list of pickers from each child that is in // ready state. The first element in the pair represents the end of a // range proportional to the child's weight. The start of the range // is the previous value in the vector and is 0 for the first element. - using PickerList = - InlinedVector>, 1>; + using PickerList = InlinedVector, 1>; - WeightedPicker(RefCountedPtr parent, PickerList pickers) + RdsPicker(RefCountedPtr parent, PickerList pickers) : parent_(std::move(parent)), pickers_(std::move(pickers)) {} - ~WeightedPicker() { parent_.reset(DEBUG_LOCATION, "WeightedPicker"); } + ~RdsPicker() { parent_.reset(DEBUG_LOCATION, "RdsPicker"); } PickResult Pick(PickArgs args) override; private: - RefCountedPtr parent_; + RefCountedPtr parent_; PickerList pickers_; }; - // Each WeightedChild holds a ref to its parent WeightedTargetLb. - class WeightedChild : public InternallyRefCounted { + // Each RdsChild holds a ref to its parent RdsLb. + class RdsChild : public InternallyRefCounted { public: - WeightedChild(RefCountedPtr weighted_target_policy, - const std::string& name); - ~WeightedChild(); + RdsChild(RefCountedPtr rds_policy, const std::string& name); + ~RdsChild(); void Orphan() override; - void UpdateLocked(const WeightedTargetLbConfig::ChildConfig& config, + void UpdateLocked(const RdsLbConfig::ChildConfig& config, const ServerAddressList& addresses, const grpc_channel_args* args); void ExitIdleLocked(); void ResetBackoffLocked(); void DeactivateLocked(); - uint32_t weight() const { return weight_; } grpc_connectivity_state connectivity_state() const { return connectivity_state_; } @@ -140,10 +134,10 @@ class WeightedTargetLb : public LoadBalancingPolicy { private: class Helper : public ChannelControlHelper { public: - explicit Helper(RefCountedPtr weighted_child) - : weighted_child_(std::move(weighted_child)) {} + explicit Helper(RefCountedPtr rds_child) + : rds_child_(std::move(rds_child)) {} - ~Helper() { weighted_child_.reset(DEBUG_LOCATION, "Helper"); } + ~Helper() { rds_child_.reset(DEBUG_LOCATION, "Helper"); } RefCountedPtr CreateSubchannel( const grpc_channel_args& args) override; @@ -153,7 +147,7 @@ class WeightedTargetLb : public LoadBalancingPolicy { void AddTraceEvent(TraceSeverity severity, StringView message) override; private: - RefCountedPtr weighted_child_; + RefCountedPtr rds_child_; }; // Methods for dealing with the child policy. @@ -164,13 +158,11 @@ class WeightedTargetLb : public LoadBalancingPolicy { static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error); // The owning LB policy. - RefCountedPtr weighted_target_policy_; + RefCountedPtr rds_policy_; - // Points to the corresponding key in WeightedTargetLb::targets_. + // Points to the corresponding key in RdsLb::actions_. const std::string& name_; - uint32_t weight_; - OrphanablePtr child_policy_; RefCountedPtr picker_wrapper_; @@ -184,7 +176,7 @@ class WeightedTargetLb : public LoadBalancingPolicy { bool shutdown_ = false; }; - ~WeightedTargetLb(); + ~RdsLb(); void ShutdownLocked() override; @@ -193,138 +185,116 @@ class WeightedTargetLb : public LoadBalancingPolicy { const grpc_millis child_retention_interval_ms_; // Current config from the resolver. - RefCountedPtr config_; + RefCountedPtr config_; // Internal state. bool shutting_down_ = false; // Children. - std::map> targets_; + std::map> actions_; }; // -// WeightedTargetLb::WeightedPicker +// RdsLb::RdsPicker // -WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick( - PickArgs args) { - // Generate a random number in [0, total weight). - const uint32_t key = rand() % pickers_[pickers_.size() - 1].first; - // Find the index in pickers_ corresponding to key. - size_t mid = 0; - size_t start_index = 0; - size_t end_index = pickers_.size() - 1; - size_t index = 0; - while (end_index > start_index) { - mid = (start_index + end_index) / 2; - if (pickers_[mid].first > key) { - end_index = mid; - } else if (pickers_[mid].first < key) { - start_index = mid + 1; - } else { - index = mid + 1; - break; - } - } - if (index == 0) index = start_index; - GPR_ASSERT(pickers_[index].first > key); - // Delegate to the child picker. - return pickers_[index].second->Pick(args); +RdsLb::PickResult RdsLb::RdsPicker::Pick(PickArgs args) { + gpr_log( + GPR_INFO, + "donna Picking not implemented yet, just always use the one and only"); + return pickers_[0]->Pick(args); } // -// WeightedTargetLb +// RdsLb // -WeightedTargetLb::WeightedTargetLb(Args args) +RdsLb::RdsLb(Args args) : LoadBalancingPolicy(std::move(args)), -// FIXME: new channel arg + // FIXME: new channel arg child_retention_interval_ms_(grpc_channel_args_find_integer( args.args, GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS, {GRPC_WEIGHTED_TARGET_CHILD_RETENTION_INTERVAL_MS, 0, INT_MAX})) {} -WeightedTargetLb::~WeightedTargetLb() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, "[weighted_target_lb %p] destroying weighted_target LB policy", - this); +RdsLb::~RdsLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] destroying rds LB policy", this); } } -void WeightedTargetLb::ShutdownLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this); +void RdsLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] shutting down", this); } shutting_down_ = true; - targets_.clear(); + actions_.clear(); } -void WeightedTargetLb::ExitIdleLocked() { - for (auto& p : targets_) p.second->ExitIdleLocked(); +void RdsLb::ExitIdleLocked() { + for (auto& p : actions_) p.second->ExitIdleLocked(); } -void WeightedTargetLb::ResetBackoffLocked() { - for (auto& p : targets_) p.second->ResetBackoffLocked(); +void RdsLb::ResetBackoffLocked() { + for (auto& p : actions_) p.second->ResetBackoffLocked(); } -void WeightedTargetLb::UpdateLocked(UpdateArgs args) { +void RdsLb::UpdateLocked(UpdateArgs args) { if (shutting_down_) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this); + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] Received update", this); } // Update config. config_ = std::move(args.config); - // Deactivate the targets not in the new config. - for (auto it = targets_.begin(); it != targets_.end();) { + // Deactivate the actions not in the new config. + for (auto it = actions_.begin(); it != actions_.end();) { const std::string& name = it->first; - WeightedChild* child = it->second.get(); - if (config_->target_map().find(name) != config_->target_map().end()) { + RdsChild* child = it->second.get(); + if (config_->action_map().find(name) != config_->action_map().end()) { ++it; continue; } if (child_retention_interval_ms_ == 0) { - it = targets_.erase(it); + it = actions_.erase(it); } else { child->DeactivateLocked(); ++it; } } - // Add or update the targets in the new config. - for (const auto& p : config_->target_map()) { + // Add or update the actions in the new config. + for (const auto& p : config_->action_map()) { const std::string& name = p.first; - const WeightedTargetLbConfig::ChildConfig& config = p.second; - auto it = targets_.find(name); - if (it == targets_.end()) { - it = targets_.emplace(std::make_pair(name, nullptr)).first; - it->second = MakeOrphanable( - Ref(DEBUG_LOCATION, "WeightedChild"), it->first); + const RdsLbConfig::ChildConfig& config = p.second; + auto it = actions_.find(name); + if (it == actions_.end()) { + it = actions_.emplace(std::make_pair(name, nullptr)).first; + it->second = + MakeOrphanable(Ref(DEBUG_LOCATION, "RdsChild"), it->first); } it->second->UpdateLocked(config, args.addresses, args.args); } } -void WeightedTargetLb::UpdateStateLocked() { +void RdsLb::UpdateStateLocked() { // Construct a new picker which maintains a map of all child pickers // that are ready. Each child is represented by a portion of the range // proportional to its weight, such that the total range is the sum of the // weights of all children. - WeightedPicker::PickerList picker_list; - uint32_t end = 0; + RdsPicker::PickerList picker_list; // Also count the number of children in each state, to determine the // overall state. size_t num_connecting = 0; size_t num_idle = 0; size_t num_transient_failures = 0; - for (const auto& p : targets_) { + for (const auto& p : actions_) { const auto& child_name = p.first; - const WeightedChild* child = p.second.get(); - // Skip the targets that are not in the latest update. - if (config_->target_map().find(child_name) == config_->target_map().end()) { + const RdsChild* child = p.second.get(); + // Skip the actions that are not in the latest update. + if (config_->action_map().find(child_name) == config_->action_map().end()) { continue; } switch (child->connectivity_state()) { case GRPC_CHANNEL_READY: { - end += child->weight(); - picker_list.push_back(std::make_pair(end, child->picker_wrapper())); + picker_list.push_back(child->picker_wrapper()); break; } case GRPC_CHANNEL_CONNECTING: { @@ -354,62 +324,59 @@ void WeightedTargetLb::UpdateStateLocked() { } else { connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; } - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s", - this, ConnectivityStateName(connectivity_state)); + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] connectivity changed to %s", this, + ConnectivityStateName(connectivity_state)); } std::unique_ptr picker; switch (connectivity_state) { case GRPC_CHANNEL_READY: - picker = absl::make_unique( - Ref(DEBUG_LOCATION, "WeightedPicker"), std::move(picker_list)); + picker = absl::make_unique(Ref(DEBUG_LOCATION, "RdsPicker"), + std::move(picker_list)); break; case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_IDLE: - picker = absl::make_unique( - Ref(DEBUG_LOCATION, "QueuePicker")); + picker = + absl::make_unique(Ref(DEBUG_LOCATION, "QueuePicker")); break; default: picker = absl::make_unique( GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "weighted_target: all children report state TRANSIENT_FAILURE")); + "rds: all children report state TRANSIENT_FAILURE")); } channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); } // -// WeightedTargetLb::WeightedChild +// RdsLb::RdsChild // -WeightedTargetLb::WeightedChild::WeightedChild( - RefCountedPtr weighted_target_policy, - const std::string& name) - : weighted_target_policy_(std::move(weighted_target_policy)), name_(name) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s", - weighted_target_policy_.get(), this, name_.c_str()); +RdsLb::RdsChild::RdsChild(RefCountedPtr rds_policy, + const std::string& name) + : rds_policy_(std::move(rds_policy)), name_(name) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] created RdsChild %p for %s", + rds_policy_.get(), this, name_.c_str()); } } -WeightedTargetLb::WeightedChild::~WeightedChild() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, - "[weighted_target_lb %p] WeightedChild %p %s: destroying child", - weighted_target_policy_.get(), this, name_.c_str()); +RdsLb::RdsChild::~RdsChild() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] RdsChild %p %s: destroying child", + rds_policy_.get(), this, name_.c_str()); } - weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild"); + rds_policy_.reset(DEBUG_LOCATION, "RdsChild"); } -void WeightedTargetLb::WeightedChild::Orphan() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { - gpr_log(GPR_INFO, - "[weighted_target_lb %p] WeightedChild %p %s: shutting down child", - weighted_target_policy_.get(), this, name_.c_str()); +void RdsLb::RdsChild::Orphan() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + gpr_log(GPR_INFO, "[rds_lb %p] RdsChild %p %s: shutting down child", + rds_policy_.get(), this, name_.c_str()); } // Remove the child policy's interested_parties pollset_set from the // xDS policy. grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), - weighted_target_policy_->interested_parties()); + rds_policy_->interested_parties()); child_policy_.reset(); // Drop our ref to the child's picker, in case it's holding a ref to // the child. @@ -421,39 +388,35 @@ void WeightedTargetLb::WeightedChild::Orphan() { Unref(); } -OrphanablePtr -WeightedTargetLb::WeightedChild::CreateChildPolicyLocked( +OrphanablePtr RdsLb::RdsChild::CreateChildPolicyLocked( const grpc_channel_args* args) { LoadBalancingPolicy::Args lb_policy_args; - lb_policy_args.combiner = weighted_target_policy_->combiner(); + lb_policy_args.combiner = rds_policy_->combiner(); lb_policy_args.args = args; lb_policy_args.channel_control_helper = absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); OrphanablePtr lb_policy = MakeOrphanable(std::move(lb_policy_args), - &grpc_lb_weighted_target_trace); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + &grpc_rds_lb_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { gpr_log(GPR_INFO, - "[weighted_target_lb %p] WeightedChild %p %s: Created new child " + "[rds_lb %p] RdsChild %p %s: Created new child " "policy handler %p", - weighted_target_policy_.get(), this, name_.c_str(), - lb_policy.get()); + rds_policy_.get(), this, name_.c_str(), lb_policy.get()); } // Add the xDS's interested_parties pollset_set to that of the newly created // child policy. This will make the child policy progress upon activity on // xDS LB, which in turn is tied to the application's call. - grpc_pollset_set_add_pollset_set( - lb_policy->interested_parties(), - weighted_target_policy_->interested_parties()); + grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), + rds_policy_->interested_parties()); return lb_policy; } -void WeightedTargetLb::WeightedChild::UpdateLocked( - const WeightedTargetLbConfig::ChildConfig& config, - const ServerAddressList& addresses, const grpc_channel_args* args) { - if (weighted_target_policy_->shutting_down_) return; +void RdsLb::RdsChild::UpdateLocked(const RdsLbConfig::ChildConfig& config, + const ServerAddressList& addresses, + const grpc_channel_args* args) { + if (rds_policy_->shutting_down_) return; // Update child weight. - weight_ = config.weight; // Reactivate if needed. if (delayed_removal_timer_callback_pending_) { grpc_timer_cancel(&delayed_removal_timer_); @@ -468,212 +431,193 @@ void WeightedTargetLb::WeightedChild::UpdateLocked( update_args.addresses = addresses; update_args.args = grpc_channel_args_copy(args); // Update the policy. - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { gpr_log(GPR_INFO, - "[weighted_target_lb %p] WeightedChild %p %s: Updating child " + "[rds_lb %p] RdsChild %p %s: Updating child " "policy handler %p", - weighted_target_policy_.get(), this, name_.c_str(), - child_policy_.get()); + rds_policy_.get(), this, name_.c_str(), child_policy_.get()); } child_policy_->UpdateLocked(std::move(update_args)); } -void WeightedTargetLb::WeightedChild::ExitIdleLocked() { - child_policy_->ExitIdleLocked(); -} +void RdsLb::RdsChild::ExitIdleLocked() { child_policy_->ExitIdleLocked(); } -void WeightedTargetLb::WeightedChild::ResetBackoffLocked() { +void RdsLb::RdsChild::ResetBackoffLocked() { child_policy_->ResetBackoffLocked(); } -void WeightedTargetLb::WeightedChild::DeactivateLocked() { +void RdsLb::RdsChild::DeactivateLocked() { // If already deactivated, don't do that again. - if (weight_ == 0) return; // Set the child weight to 0 so that future picker won't contain this child. - weight_ = 0; // Start a timer to delete the child. - Ref(DEBUG_LOCATION, "WeightedChild+timer").release(); + Ref(DEBUG_LOCATION, "RdsChild+timer").release(); GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, grpc_schedule_on_exec_ctx); grpc_timer_init( &delayed_removal_timer_, - ExecCtx::Get()->Now() + - weighted_target_policy_->child_retention_interval_ms_, + ExecCtx::Get()->Now() + rds_policy_->child_retention_interval_ms_, &on_delayed_removal_timer_); delayed_removal_timer_callback_pending_ = true; } -void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimer(void* arg, - grpc_error* error) { - WeightedChild* self = static_cast(arg); - self->weighted_target_policy_->combiner()->Run( +void RdsLb::RdsChild::OnDelayedRemovalTimer(void* arg, grpc_error* error) { + RdsChild* self = static_cast(arg); + self->rds_policy_->combiner()->Run( GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, OnDelayedRemovalTimerLocked, self, nullptr), GRPC_ERROR_REF(error)); } -void WeightedTargetLb::WeightedChild::OnDelayedRemovalTimerLocked( - void* arg, grpc_error* error) { - WeightedChild* self = static_cast(arg); +void RdsLb::RdsChild::OnDelayedRemovalTimerLocked(void* arg, + grpc_error* error) { + RdsChild* self = static_cast(arg); self->delayed_removal_timer_callback_pending_ = false; - if (error == GRPC_ERROR_NONE && !self->shutdown_ && self->weight_ == 0) { - self->weighted_target_policy_->targets_.erase(self->name_); + if (error == GRPC_ERROR_NONE && !self->shutdown_) { + self->rds_policy_->actions_.erase(self->name_); } - self->Unref(DEBUG_LOCATION, "WeightedChild+timer"); + self->Unref(DEBUG_LOCATION, "RdsChild+timer"); } // -// WeightedTargetLb::WeightedChild::Helper +// RdsLb::RdsChild::Helper // -RefCountedPtr -WeightedTargetLb::WeightedChild::Helper::CreateSubchannel( +RefCountedPtr RdsLb::RdsChild::Helper::CreateSubchannel( const grpc_channel_args& args) { - if (weighted_child_->weighted_target_policy_->shutting_down_) return nullptr; - return weighted_child_->weighted_target_policy_->channel_control_helper()->CreateSubchannel( + if (rds_child_->rds_policy_->shutting_down_) return nullptr; + return rds_child_->rds_policy_->channel_control_helper()->CreateSubchannel( args); } -void WeightedTargetLb::WeightedChild::Helper::UpdateState( +void RdsLb::RdsChild::Helper::UpdateState( grpc_connectivity_state state, std::unique_ptr picker) { - if (weighted_child_->weighted_target_policy_->shutting_down_) return; - // Cache the picker in the WeightedChild. - weighted_child_->picker_wrapper_ = + if (rds_child_->rds_policy_->shutting_down_) return; + // Cache the picker in the RdsChild. + rds_child_->picker_wrapper_ = MakeRefCounted(std::move(picker)); // Decide what state to report for aggregation purposes. // If we haven't seen a failure since the last time we were in state // READY, then we report the state change as-is. However, once we do see // a failure, we report TRANSIENT_FAILURE and ignore any subsequent state // changes until we go back into state READY. - if (!weighted_child_->seen_failure_since_ready_) { + if (!rds_child_->seen_failure_since_ready_) { if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - weighted_child_->seen_failure_since_ready_ = true; + rds_child_->seen_failure_since_ready_ = true; } } else { if (state != GRPC_CHANNEL_READY) return; - weighted_child_->seen_failure_since_ready_ = false; + rds_child_->seen_failure_since_ready_ = false; } - weighted_child_->connectivity_state_ = state; + rds_child_->connectivity_state_ = state; // Notify the LB policy. - weighted_child_->weighted_target_policy_->UpdateStateLocked(); + rds_child_->rds_policy_->UpdateStateLocked(); } -void WeightedTargetLb::WeightedChild::Helper::RequestReresolution() { - if (weighted_child_->weighted_target_policy_->shutting_down_) return; - weighted_child_->weighted_target_policy_->channel_control_helper()->RequestReresolution(); +void RdsLb::RdsChild::Helper::RequestReresolution() { + if (rds_child_->rds_policy_->shutting_down_) return; + rds_child_->rds_policy_->channel_control_helper()->RequestReresolution(); } -void WeightedTargetLb::WeightedChild::Helper::AddTraceEvent(TraceSeverity severity, - StringView message) { - if (weighted_child_->weighted_target_policy_->shutting_down_) return; - weighted_child_->weighted_target_policy_->channel_control_helper()->AddTraceEvent( - severity, message); +void RdsLb::RdsChild::Helper::AddTraceEvent(TraceSeverity severity, + StringView message) { + if (rds_child_->rds_policy_->shutting_down_) return; + rds_child_->rds_policy_->channel_control_helper()->AddTraceEvent(severity, + message); } // // factory // -class WeightedTargetLbFactory : public LoadBalancingPolicyFactory { +class RdsLbFactory : public LoadBalancingPolicyFactory { public: OrphanablePtr CreateLoadBalancingPolicy( LoadBalancingPolicy::Args args) const override { - return MakeOrphanable(std::move(args)); + return MakeOrphanable(std::move(args)); } - const char* name() const override { return kWeightedTarget; } + const char* name() const override { return kRds; } RefCountedPtr ParseLoadBalancingConfig( const Json& json, grpc_error** error) const override { GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); if (json.type() == Json::Type::JSON_NULL) { - // weighted_target was mentioned as a policy in the deprecated + // rds was mentioned as a policy in the deprecated // loadBalancingPolicy field or in the client API. *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:loadBalancingPolicy error:weighted_target policy requires " + "field:loadBalancingPolicy error:rds policy requires " "configuration. Please use loadBalancingConfig field of service " "config instead."); return nullptr; } std::vector error_list; // Weight map. - WeightedTargetLbConfig::TargetMap target_map; - auto it = json.object_value().find("targets"); + RdsLbConfig::ActionMap action_map; + auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:targets error:required field not present")); - } else if (it->second.type() != Json::Type::OBJECT) { + "field:actions error:required field not present")); + } else if (it->second.type() != Json::Type::ARRAY) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:targets error:type should be object")); + "field:actions error:type should be array")); } else { - for (const auto& p : it->second.object_value()) { - WeightedTargetLbConfig::ChildConfig child_config; + for (const auto& p : it->second.array_value()) { + auto it_name = p.object_value().find("name"); + if (it_name == p.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:actions error: each action needs a name")); + } + auto it_child_policy = p.object_value().find("child_policy"); + if (it_child_policy == p.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:actions error: each action needs child policies")); + } + RdsLbConfig::ChildConfig child_config; std::vector child_errors = - ParseChildConfig(p.second, &child_config); + ParseChildConfig(it_child_policy->second, &child_config); if (!child_errors.empty()) { // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error // string is not static in this case. grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( - absl::StrCat("field:targets key:", p.first).c_str()); + absl::StrCat("field:actions name:", + it_name->second.string_value()) + .c_str()); for (grpc_error* child_error : child_errors) { error = grpc_error_add_child(error, child_error); } error_list.push_back(error); } else { - target_map[p.first] = std::move(child_config); + action_map[it_name->second.string_value()] = std::move(child_config); } } } if (!error_list.empty()) { *error = GRPC_ERROR_CREATE_FROM_VECTOR( - "weighted_target_experimental LB policy config", &error_list); + "rds_experimental LB policy config", &error_list); return nullptr; } - return MakeRefCounted(std::move(target_map)); + return MakeRefCounted(std::move(action_map)); } private: static std::vector ParseChildConfig( - const Json& json, WeightedTargetLbConfig::ChildConfig* child_config) { + const Json& json, RdsLbConfig::ChildConfig* child_config) { std::vector error_list; - if (json.type() != Json::Type::OBJECT) { + if (json.type() != Json::Type::ARRAY) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "value should be of type object")); + "value should be of type array")); return error_list; } - // Weight. - auto it = json.object_value().find("weight"); - if (it == json.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "required field \"weight\" not specified")); - } else if (it->second.type() != Json::Type::NUMBER) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:weight error:must be of type number")); - } else { - child_config->weight = - gpr_parse_nonnegative_int(it->second.string_value().c_str()); - if (child_config->weight == -1) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:weight error:unparseable value")); - } else if (child_config->weight == 0) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:weight error:value must be greater than zero")); - } - } - // Child policy. - it = json.object_value().find("childPolicy"); - if (it != json.object_value().end()) { - grpc_error* parse_error = GRPC_ERROR_NONE; - child_config->config = - LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second, - &parse_error); - if (child_config->config == nullptr) { - GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); - std::vector child_errors; - child_errors.push_back(parse_error); - error_list.push_back( - GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); - } + grpc_error* parse_error = GRPC_ERROR_NONE; + child_config->config = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + json.array_value(), &parse_error); + if (child_config->config == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + std::vector child_errors; + child_errors.push_back(parse_error); + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); } return error_list; } @@ -687,10 +631,10 @@ class WeightedTargetLbFactory : public LoadBalancingPolicyFactory { // Plugin registration // -void grpc_lb_policy_weighted_target_init() { +void grpc_lb_policy_rds_init() { grpc_core::LoadBalancingPolicyRegistry::Builder:: RegisterLoadBalancingPolicyFactory( - absl::make_unique()); + absl::make_unique()); } -void grpc_lb_policy_weighted_target_shutdown() {} +void grpc_lb_policy_rds_shutdown() {} diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 42708fca556..a9725a392a9 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -1946,8 +1946,15 @@ grpc_error* XdsClient::CreateServiceConfig( gpr_asprintf(&json, "{\n" " \"loadBalancingConfig\":[\n" - " { \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" + " { \"xds_routing_experimental\":{\n" + " \"actions\":[\n" + " { \"name\": \"default\",\n" + " \"child_policy\":[\n" + " { \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " } }\n" + " ]\n" + " } ]\n" " } }\n" " ]\n" "}", diff --git a/src/core/plugin_registry/grpc_plugin_registry.cc b/src/core/plugin_registry/grpc_plugin_registry.cc index 20ad526d837..4af79901595 100644 --- a/src/core/plugin_registry/grpc_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_plugin_registry.cc @@ -36,6 +36,8 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); +void grpc_lb_policy_rds_init(void); +void grpc_lb_policy_rds_shutdown(void); void grpc_lb_policy_xds_init(void); void grpc_lb_policy_xds_shutdown(void); void grpc_lb_policy_pick_first_init(void); @@ -78,6 +80,8 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); + grpc_register_plugin(grpc_lb_policy_rds_init, + grpc_lb_policy_rds_shutdown); grpc_register_plugin(grpc_lb_policy_xds_init, grpc_lb_policy_xds_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc index bfed2e22ddd..c150d64bb62 100644 --- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc @@ -44,6 +44,8 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); +void grpc_lb_policy_rds_init(void); +void grpc_lb_policy_rds_shutdown(void); void grpc_lb_policy_xds_init(void); void grpc_lb_policy_xds_shutdown(void); void grpc_lb_policy_pick_first_init(void); @@ -86,6 +88,8 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); + grpc_register_plugin(grpc_lb_policy_rds_init, + grpc_lb_policy_rds_shutdown); grpc_register_plugin(grpc_lb_policy_xds_init, grpc_lb_policy_xds_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 1fbdbad953e..522ed07d202 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1546,9 +1546,9 @@ TEST_P(BasicTest, Vanilla) { backends_[i]->backend_service()->request_count()); } // Check LB policy name for the channel. - EXPECT_EQ( - (GetParam().use_xds_resolver() ? "cds_experimental" : "xds_experimental"), - channel_->GetLoadBalancingPolicyName()); + EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_routing_experimental" + : "xds_experimental"), + channel_->GetLoadBalancingPolicyName()); } TEST_P(BasicTest, IgnoresUnhealthyEndpoints) { From d469f3119875edf91503bb0f39d8caedf258affd Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Sun, 15 Mar 2020 12:56:43 -0700 Subject: [PATCH 03/37] Address code review comment of renaming --- BUILD | 19 +- .../lb_policy/xds/{rds.cc => xds_routing.cc} | 275 +++++++++--------- .../filters/client_channel/xds/xds_client.cc | 14 +- .../plugin_registry/grpc_plugin_registry.cc | 8 +- .../grpc_unsecure_plugin_registry.cc | 4 - 5 files changed, 158 insertions(+), 162 deletions(-) rename src/core/ext/filters/client_channel/lb_policy/xds/{rds.cc => xds_routing.cc} (63%) diff --git a/BUILD b/BUILD index a0c9247e74e..66989f4a867 100644 --- a/BUILD +++ b/BUILD @@ -320,7 +320,6 @@ grpc_cc_library( "grpc_common", "grpc_lb_policy_cds", "grpc_lb_policy_grpclb", - "grpc_lb_policy_rds", "grpc_lb_policy_xds", "grpc_resolver_xds", ], @@ -339,7 +338,7 @@ grpc_cc_library( "grpc_common", "grpc_lb_policy_cds_secure", "grpc_lb_policy_grpclb_secure", - "grpc_lb_policy_rds_secure", + "grpc_lb_policy_xds_routing", "grpc_lb_policy_xds_secure", "grpc_resolver_xds_secure", "grpc_secure", @@ -1400,21 +1399,9 @@ grpc_cc_library( ) grpc_cc_library( - name = "grpc_lb_policy_rds", + name = "grpc_lb_policy_xds_routing", srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/rds.cc", - ], - language = "c++", - deps = [ - "grpc_base", - "grpc_client_channel", - ], -) - -grpc_cc_library( - name = "grpc_lb_policy_rds_secure", - srcs = [ - "src/core/ext/filters/client_channel/lb_policy/xds/rds.cc", + "src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc", ], language = "c++", deps = [ diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc similarity index 63% rename from src/core/ext/filters/client_channel/lb_policy/xds/rds.cc rename to src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 6a730d2a512..7e63263c750 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/rds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -35,18 +35,18 @@ #include "src/core/lib/iomgr/combiner.h" #include "src/core/lib/iomgr/timer.h" -#define GRPC_WEIGHTED_TARGET_CHILD_RETENTION_INTERVAL_MS (15 * 60 * 1000) +#define GRPC_XDS_ROUTING_CHILD_RETENTION_INTERVAL_MS (15 * 60 * 1000) namespace grpc_core { -TraceFlag grpc_rds_lb_trace(false, "rds_lb"); +TraceFlag grpc_xds_routing_lb_trace(false, "xds_routing_lb"); namespace { -constexpr char kRds[] = "xds_routing_experimental"; +constexpr char kXdsRouting[] = "xds_routing_experimental"; -// Config for rds LB policy. -class RdsLbConfig : public LoadBalancingPolicy::Config { +// Config for xds_routing LB policy. +class XdsRoutingLbConfig : public LoadBalancingPolicy::Config { public: struct ChildConfig { RefCountedPtr config; @@ -54,10 +54,10 @@ class RdsLbConfig : public LoadBalancingPolicy::Config { using ActionMap = std::map; - explicit RdsLbConfig(ActionMap action_map) + explicit XdsRoutingLbConfig(ActionMap action_map) : action_map_(std::move(action_map)) {} - const char* name() const override { return kRds; } + const char* name() const override { return kXdsRouting; } const ActionMap& action_map() const { return action_map_; } @@ -65,12 +65,12 @@ class RdsLbConfig : public LoadBalancingPolicy::Config { ActionMap action_map_; }; -// rds LB policy. -class RdsLb : public LoadBalancingPolicy { +// xds_routing LB policy. +class XdsRoutingLb : public LoadBalancingPolicy { public: - explicit RdsLb(Args args); + explicit XdsRoutingLb(Args args); - const char* name() const override { return kRds; } + const char* name() const override { return kXdsRouting; } void UpdateLocked(UpdateArgs args) override; void ExitIdleLocked() override; @@ -82,7 +82,8 @@ class RdsLb : public LoadBalancingPolicy { public: explicit ChildPickerWrapper(std::unique_ptr picker) : picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } + PickResult Pick(PickArgs args) { + return picker_->Pick(std::move(args)); } private: std::unique_ptr picker_; @@ -90,34 +91,34 @@ class RdsLb : public LoadBalancingPolicy { // Picks a child using stateless WRR and then delegates to that // child's picker. - class RdsPicker : public SubchannelPicker { + class XdsRoutingPicker : public SubchannelPicker { public: - // Maintains a rds list of pickers from each child that is in + // Maintains a xds_routing list of pickers from each child that is in // ready state. The first element in the pair represents the end of a // range proportional to the child's weight. The start of the range // is the previous value in the vector and is 0 for the first element. using PickerList = InlinedVector, 1>; - RdsPicker(RefCountedPtr parent, PickerList pickers) + XdsRoutingPicker(RefCountedPtr parent, PickerList pickers) : parent_(std::move(parent)), pickers_(std::move(pickers)) {} - ~RdsPicker() { parent_.reset(DEBUG_LOCATION, "RdsPicker"); } + ~XdsRoutingPicker() { parent_.reset(DEBUG_LOCATION, "XdsRoutingPicker"); } PickResult Pick(PickArgs args) override; private: - RefCountedPtr parent_; + RefCountedPtr parent_; PickerList pickers_; }; - // Each RdsChild holds a ref to its parent RdsLb. - class RdsChild : public InternallyRefCounted { + // Each XdsRoutingChild holds a ref to its parent XdsRoutingLb. + class XdsRoutingChild : public InternallyRefCounted { public: - RdsChild(RefCountedPtr rds_policy, const std::string& name); - ~RdsChild(); + XdsRoutingChild(RefCountedPtr xds_routing_policy, const std::string& name); + ~XdsRoutingChild(); void Orphan() override; - void UpdateLocked(const RdsLbConfig::ChildConfig& config, + void UpdateLocked(const XdsRoutingLbConfig::ChildConfig& config, const ServerAddressList& addresses, const grpc_channel_args* args); void ExitIdleLocked(); @@ -134,10 +135,10 @@ class RdsLb : public LoadBalancingPolicy { private: class Helper : public ChannelControlHelper { public: - explicit Helper(RefCountedPtr rds_child) - : rds_child_(std::move(rds_child)) {} + explicit Helper(RefCountedPtr xds_routing_child) + : xds_routing_child_(std::move(xds_routing_child)) {} - ~Helper() { rds_child_.reset(DEBUG_LOCATION, "Helper"); } + ~Helper() { xds_routing_child_.reset(DEBUG_LOCATION, "Helper"); } RefCountedPtr CreateSubchannel( const grpc_channel_args& args) override; @@ -147,7 +148,7 @@ class RdsLb : public LoadBalancingPolicy { void AddTraceEvent(TraceSeverity severity, StringView message) override; private: - RefCountedPtr rds_child_; + RefCountedPtr xds_routing_child_; }; // Methods for dealing with the child policy. @@ -158,9 +159,9 @@ class RdsLb : public LoadBalancingPolicy { static void OnDelayedRemovalTimerLocked(void* arg, grpc_error* error); // The owning LB policy. - RefCountedPtr rds_policy_; + RefCountedPtr xds_routing_policy_; - // Points to the corresponding key in RdsLb::actions_. + // Points to the corresponding key in XdsRoutingLb::actions_. const std::string& name_; OrphanablePtr child_policy_; @@ -176,7 +177,7 @@ class RdsLb : public LoadBalancingPolicy { bool shutdown_ = false; }; - ~RdsLb(); + ~XdsRoutingLb(); void ShutdownLocked() override; @@ -185,70 +186,68 @@ class RdsLb : public LoadBalancingPolicy { const grpc_millis child_retention_interval_ms_; // Current config from the resolver. - RefCountedPtr config_; + RefCountedPtr config_; // Internal state. bool shutting_down_ = false; // Children. - std::map> actions_; + std::map> actions_; }; // -// RdsLb::RdsPicker +// XdsRoutingLb::XdsRoutingPicker // -RdsLb::PickResult RdsLb::RdsPicker::Pick(PickArgs args) { - gpr_log( - GPR_INFO, - "donna Picking not implemented yet, just always use the one and only"); +XdsRoutingLb::PickResult XdsRoutingLb::XdsRoutingPicker::Pick(PickArgs args) { + gpr_log(GPR_INFO, "donna picked first first"); return pickers_[0]->Pick(args); } // -// RdsLb +// XdsRoutingLb // -RdsLb::RdsLb(Args args) +XdsRoutingLb::XdsRoutingLb(Args args) : LoadBalancingPolicy(std::move(args)), // FIXME: new channel arg child_retention_interval_ms_(grpc_channel_args_find_integer( args.args, GRPC_ARG_LOCALITY_RETENTION_INTERVAL_MS, - {GRPC_WEIGHTED_TARGET_CHILD_RETENTION_INTERVAL_MS, 0, INT_MAX})) {} + {GRPC_XDS_ROUTING_CHILD_RETENTION_INTERVAL_MS, 0, INT_MAX})) {} -RdsLb::~RdsLb() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] destroying rds LB policy", this); +XdsRoutingLb::~XdsRoutingLb() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] destroying xds_routing LB policy", this); } } -void RdsLb::ShutdownLocked() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] shutting down", this); +void XdsRoutingLb::ShutdownLocked() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] shutting down", this); } shutting_down_ = true; actions_.clear(); } -void RdsLb::ExitIdleLocked() { +void XdsRoutingLb::ExitIdleLocked() { for (auto& p : actions_) p.second->ExitIdleLocked(); } -void RdsLb::ResetBackoffLocked() { +void XdsRoutingLb::ResetBackoffLocked() { for (auto& p : actions_) p.second->ResetBackoffLocked(); } -void RdsLb::UpdateLocked(UpdateArgs args) { +void XdsRoutingLb::UpdateLocked(UpdateArgs args) { if (shutting_down_) return; - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] Received update", this); + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] Received update", this); } // Update config. config_ = std::move(args.config); // Deactivate the actions not in the new config. for (auto it = actions_.begin(); it != actions_.end();) { const std::string& name = it->first; - RdsChild* child = it->second.get(); + XdsRoutingChild* child = it->second.get(); if (config_->action_map().find(name) != config_->action_map().end()) { ++it; continue; @@ -263,23 +262,23 @@ void RdsLb::UpdateLocked(UpdateArgs args) { // Add or update the actions in the new config. for (const auto& p : config_->action_map()) { const std::string& name = p.first; - const RdsLbConfig::ChildConfig& config = p.second; + const XdsRoutingLbConfig::ChildConfig& config = p.second; auto it = actions_.find(name); if (it == actions_.end()) { it = actions_.emplace(std::make_pair(name, nullptr)).first; it->second = - MakeOrphanable(Ref(DEBUG_LOCATION, "RdsChild"), it->first); + MakeOrphanable(Ref(DEBUG_LOCATION, "XdsRoutingChild"), it->first); } it->second->UpdateLocked(config, args.addresses, args.args); } } -void RdsLb::UpdateStateLocked() { +void XdsRoutingLb::UpdateStateLocked() { // Construct a new picker which maintains a map of all child pickers // that are ready. Each child is represented by a portion of the range // proportional to its weight, such that the total range is the sum of the // weights of all children. - RdsPicker::PickerList picker_list; + XdsRoutingPicker::PickerList picker_list; // Also count the number of children in each state, to determine the // overall state. size_t num_connecting = 0; @@ -287,7 +286,7 @@ void RdsLb::UpdateStateLocked() { size_t num_transient_failures = 0; for (const auto& p : actions_) { const auto& child_name = p.first; - const RdsChild* child = p.second.get(); + const XdsRoutingChild* child = p.second.get(); // Skip the actions that are not in the latest update. if (config_->action_map().find(child_name) == config_->action_map().end()) { continue; @@ -324,14 +323,14 @@ void RdsLb::UpdateStateLocked() { } else { connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE; } - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] connectivity changed to %s", this, + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] connectivity changed to %s", this, ConnectivityStateName(connectivity_state)); } std::unique_ptr picker; switch (connectivity_state) { case GRPC_CHANNEL_READY: - picker = absl::make_unique(Ref(DEBUG_LOCATION, "RdsPicker"), + picker = absl::make_unique(Ref(DEBUG_LOCATION, "XdsRoutingPicker"), std::move(picker_list)); break; case GRPC_CHANNEL_CONNECTING: @@ -342,41 +341,41 @@ void RdsLb::UpdateStateLocked() { default: picker = absl::make_unique( GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "rds: all children report state TRANSIENT_FAILURE")); + "xds_routing: all children report state TRANSIENT_FAILURE")); } channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); } // -// RdsLb::RdsChild +// XdsRoutingLb::XdsRoutingChild // -RdsLb::RdsChild::RdsChild(RefCountedPtr rds_policy, +XdsRoutingLb::XdsRoutingChild::XdsRoutingChild(RefCountedPtr xds_routing_policy, const std::string& name) - : rds_policy_(std::move(rds_policy)), name_(name) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] created RdsChild %p for %s", - rds_policy_.get(), this, name_.c_str()); + : xds_routing_policy_(std::move(xds_routing_policy)), name_(name) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] created XdsRoutingChild %p for %s", + xds_routing_policy_.get(), this, name_.c_str()); } } -RdsLb::RdsChild::~RdsChild() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] RdsChild %p %s: destroying child", - rds_policy_.get(), this, name_.c_str()); +XdsRoutingLb::XdsRoutingChild::~XdsRoutingChild() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] XdsRoutingChild %p %s: destroying child", + xds_routing_policy_.get(), this, name_.c_str()); } - rds_policy_.reset(DEBUG_LOCATION, "RdsChild"); + xds_routing_policy_.reset(DEBUG_LOCATION, "XdsRoutingChild"); } -void RdsLb::RdsChild::Orphan() { - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { - gpr_log(GPR_INFO, "[rds_lb %p] RdsChild %p %s: shutting down child", - rds_policy_.get(), this, name_.c_str()); +void XdsRoutingLb::XdsRoutingChild::Orphan() { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, "[xds_routing_lb %p] XdsRoutingChild %p %s: shutting down child", + xds_routing_policy_.get(), this, name_.c_str()); } // Remove the child policy's interested_parties pollset_set from the // xDS policy. grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(), - rds_policy_->interested_parties()); + xds_routing_policy_->interested_parties()); child_policy_.reset(); // Drop our ref to the child's picker, in case it's holding a ref to // the child. @@ -388,34 +387,34 @@ void RdsLb::RdsChild::Orphan() { Unref(); } -OrphanablePtr RdsLb::RdsChild::CreateChildPolicyLocked( +OrphanablePtr XdsRoutingLb::XdsRoutingChild::CreateChildPolicyLocked( const grpc_channel_args* args) { LoadBalancingPolicy::Args lb_policy_args; - lb_policy_args.combiner = rds_policy_->combiner(); + lb_policy_args.combiner = xds_routing_policy_->combiner(); lb_policy_args.args = args; lb_policy_args.channel_control_helper = absl::make_unique(this->Ref(DEBUG_LOCATION, "Helper")); OrphanablePtr lb_policy = MakeOrphanable(std::move(lb_policy_args), - &grpc_rds_lb_trace); - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + &grpc_xds_routing_lb_trace); + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { gpr_log(GPR_INFO, - "[rds_lb %p] RdsChild %p %s: Created new child " + "[xds_routing_lb %p] XdsRoutingChild %p %s: Created new child " "policy handler %p", - rds_policy_.get(), this, name_.c_str(), lb_policy.get()); + xds_routing_policy_.get(), this, name_.c_str(), lb_policy.get()); } // Add the xDS's interested_parties pollset_set to that of the newly created // child policy. This will make the child policy progress upon activity on // xDS LB, which in turn is tied to the application's call. grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(), - rds_policy_->interested_parties()); + xds_routing_policy_->interested_parties()); return lb_policy; } -void RdsLb::RdsChild::UpdateLocked(const RdsLbConfig::ChildConfig& config, +void XdsRoutingLb::XdsRoutingChild::UpdateLocked(const XdsRoutingLbConfig::ChildConfig& config, const ServerAddressList& addresses, const grpc_channel_args* args) { - if (rds_policy_->shutting_down_) return; + if (xds_routing_policy_->shutting_down_) return; // Update child weight. // Reactivate if needed. if (delayed_removal_timer_callback_pending_) { @@ -431,97 +430,99 @@ void RdsLb::RdsChild::UpdateLocked(const RdsLbConfig::ChildConfig& config, update_args.addresses = addresses; update_args.args = grpc_channel_args_copy(args); // Update the policy. - if (GRPC_TRACE_FLAG_ENABLED(grpc_rds_lb_trace)) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { gpr_log(GPR_INFO, - "[rds_lb %p] RdsChild %p %s: Updating child " + "[xds_routing_lb %p] XdsRoutingChild %p %s: Updating child " "policy handler %p", - rds_policy_.get(), this, name_.c_str(), child_policy_.get()); + xds_routing_policy_.get(), this, name_.c_str(), child_policy_.get()); } child_policy_->UpdateLocked(std::move(update_args)); } -void RdsLb::RdsChild::ExitIdleLocked() { child_policy_->ExitIdleLocked(); } +void XdsRoutingLb::XdsRoutingChild::ExitIdleLocked() { child_policy_->ExitIdleLocked(); } -void RdsLb::RdsChild::ResetBackoffLocked() { +void XdsRoutingLb::XdsRoutingChild::ResetBackoffLocked() { child_policy_->ResetBackoffLocked(); } -void RdsLb::RdsChild::DeactivateLocked() { +void XdsRoutingLb::XdsRoutingChild::DeactivateLocked() { // If already deactivated, don't do that again. // Set the child weight to 0 so that future picker won't contain this child. // Start a timer to delete the child. - Ref(DEBUG_LOCATION, "RdsChild+timer").release(); + Ref(DEBUG_LOCATION, "XdsRoutingChild+timer").release(); GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this, grpc_schedule_on_exec_ctx); grpc_timer_init( &delayed_removal_timer_, - ExecCtx::Get()->Now() + rds_policy_->child_retention_interval_ms_, + ExecCtx::Get()->Now() + xds_routing_policy_->child_retention_interval_ms_, &on_delayed_removal_timer_); delayed_removal_timer_callback_pending_ = true; } -void RdsLb::RdsChild::OnDelayedRemovalTimer(void* arg, grpc_error* error) { - RdsChild* self = static_cast(arg); - self->rds_policy_->combiner()->Run( +void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimer(void* arg, grpc_error* error) { + XdsRoutingChild* self = static_cast(arg); + self->xds_routing_policy_->combiner()->Run( GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, OnDelayedRemovalTimerLocked, self, nullptr), GRPC_ERROR_REF(error)); } -void RdsLb::RdsChild::OnDelayedRemovalTimerLocked(void* arg, +void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked(void* arg, grpc_error* error) { - RdsChild* self = static_cast(arg); + XdsRoutingChild* self = static_cast(arg); self->delayed_removal_timer_callback_pending_ = false; if (error == GRPC_ERROR_NONE && !self->shutdown_) { - self->rds_policy_->actions_.erase(self->name_); + self->xds_routing_policy_->actions_.erase(self->name_); } - self->Unref(DEBUG_LOCATION, "RdsChild+timer"); + self->Unref(DEBUG_LOCATION, "XdsRoutingChild+timer"); } // -// RdsLb::RdsChild::Helper +// XdsRoutingLb::XdsRoutingChild::Helper // -RefCountedPtr RdsLb::RdsChild::Helper::CreateSubchannel( +RefCountedPtr XdsRoutingLb::XdsRoutingChild::Helper::CreateSubchannel( const grpc_channel_args& args) { - if (rds_child_->rds_policy_->shutting_down_) return nullptr; - return rds_child_->rds_policy_->channel_control_helper()->CreateSubchannel( + gpr_log(GPR_INFO, "donna XdsRoutingChild::Helper::CreateSubchannel"); + if (xds_routing_child_->xds_routing_policy_->shutting_down_) return nullptr; + return xds_routing_child_->xds_routing_policy_->channel_control_helper()->CreateSubchannel( args); } -void RdsLb::RdsChild::Helper::UpdateState( +void XdsRoutingLb::XdsRoutingChild::Helper::UpdateState( grpc_connectivity_state state, std::unique_ptr picker) { - if (rds_child_->rds_policy_->shutting_down_) return; - // Cache the picker in the RdsChild. - rds_child_->picker_wrapper_ = + gpr_log(GPR_INFO, "donna XdsRoutingChild::Helper::UpdateState"); + if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; + // Cache the picker in the XdsRoutingChild. + xds_routing_child_->picker_wrapper_ = MakeRefCounted(std::move(picker)); // Decide what state to report for aggregation purposes. // If we haven't seen a failure since the last time we were in state // READY, then we report the state change as-is. However, once we do see // a failure, we report TRANSIENT_FAILURE and ignore any subsequent state // changes until we go back into state READY. - if (!rds_child_->seen_failure_since_ready_) { + if (!xds_routing_child_->seen_failure_since_ready_) { if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) { - rds_child_->seen_failure_since_ready_ = true; + xds_routing_child_->seen_failure_since_ready_ = true; } } else { if (state != GRPC_CHANNEL_READY) return; - rds_child_->seen_failure_since_ready_ = false; + xds_routing_child_->seen_failure_since_ready_ = false; } - rds_child_->connectivity_state_ = state; + xds_routing_child_->connectivity_state_ = state; // Notify the LB policy. - rds_child_->rds_policy_->UpdateStateLocked(); + xds_routing_child_->xds_routing_policy_->UpdateStateLocked(); } -void RdsLb::RdsChild::Helper::RequestReresolution() { - if (rds_child_->rds_policy_->shutting_down_) return; - rds_child_->rds_policy_->channel_control_helper()->RequestReresolution(); +void XdsRoutingLb::XdsRoutingChild::Helper::RequestReresolution() { + if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; + xds_routing_child_->xds_routing_policy_->channel_control_helper()->RequestReresolution(); } -void RdsLb::RdsChild::Helper::AddTraceEvent(TraceSeverity severity, +void XdsRoutingLb::XdsRoutingChild::Helper::AddTraceEvent(TraceSeverity severity, StringView message) { - if (rds_child_->rds_policy_->shutting_down_) return; - rds_child_->rds_policy_->channel_control_helper()->AddTraceEvent(severity, + if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; + xds_routing_child_->xds_routing_policy_->channel_control_helper()->AddTraceEvent(severity, message); } @@ -529,30 +530,30 @@ void RdsLb::RdsChild::Helper::AddTraceEvent(TraceSeverity severity, // factory // -class RdsLbFactory : public LoadBalancingPolicyFactory { +class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { public: OrphanablePtr CreateLoadBalancingPolicy( LoadBalancingPolicy::Args args) const override { - return MakeOrphanable(std::move(args)); + return MakeOrphanable(std::move(args)); } - const char* name() const override { return kRds; } + const char* name() const override { return kXdsRouting; } RefCountedPtr ParseLoadBalancingConfig( const Json& json, grpc_error** error) const override { GPR_DEBUG_ASSERT(error != nullptr && *error == GRPC_ERROR_NONE); if (json.type() == Json::Type::JSON_NULL) { - // rds was mentioned as a policy in the deprecated + // xds_routing was mentioned as a policy in the deprecated // loadBalancingPolicy field or in the client API. *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:loadBalancingPolicy error:rds policy requires " + "field:loadBalancingPolicy error:xds_routing policy requires " "configuration. Please use loadBalancingConfig field of service " "config instead."); return nullptr; } std::vector error_list; - // Weight map. - RdsLbConfig::ActionMap action_map; + // action map. + XdsRoutingLbConfig::ActionMap action_map; auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -562,17 +563,19 @@ class RdsLbFactory : public LoadBalancingPolicyFactory { "field:actions error:type should be array")); } else { for (const auto& p : it->second.array_value()) { - auto it_name = p.object_value().find("name"); - if (it_name == p.object_value().end()) { + auto it_cds = p.object_value().find("cds"); + auto it_weighted_target = p.object_value().find("weighted_target"); + if (it_cds == p.object_value().end() && it_weighted_target == p.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:actions error: each action needs a name")); + "field:actions error: each action needs to be either cds or weighted target")); } + auto it_name = (it_cds == p.object_value().end() ? it_weighted_target : it_cds); auto it_child_policy = p.object_value().find("child_policy"); if (it_child_policy == p.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:actions error: each action needs child policies")); } - RdsLbConfig::ChildConfig child_config; + XdsRoutingLbConfig::ChildConfig child_config; std::vector child_errors = ParseChildConfig(it_child_policy->second, &child_config); if (!child_errors.empty()) { @@ -593,15 +596,15 @@ class RdsLbFactory : public LoadBalancingPolicyFactory { } if (!error_list.empty()) { *error = GRPC_ERROR_CREATE_FROM_VECTOR( - "rds_experimental LB policy config", &error_list); + "xds_routing_experimental LB policy config", &error_list); return nullptr; } - return MakeRefCounted(std::move(action_map)); + return MakeRefCounted(std::move(action_map)); } private: static std::vector ParseChildConfig( - const Json& json, RdsLbConfig::ChildConfig* child_config) { + const Json& json, XdsRoutingLbConfig::ChildConfig* child_config) { std::vector error_list; if (json.type() != Json::Type::ARRAY) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -631,10 +634,10 @@ class RdsLbFactory : public LoadBalancingPolicyFactory { // Plugin registration // -void grpc_lb_policy_rds_init() { +void grpc_lb_policy_xds_routing_init() { grpc_core::LoadBalancingPolicyRegistry::Builder:: RegisterLoadBalancingPolicyFactory( - absl::make_unique()); + absl::make_unique()); } -void grpc_lb_policy_rds_shutdown() {} +void grpc_lb_policy_xds_routing_shutdown() {} diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index a9725a392a9..cb793f6d00d 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -1948,7 +1948,17 @@ grpc_error* XdsClient::CreateServiceConfig( " \"loadBalancingConfig\":[\n" " { \"xds_routing_experimental\":{\n" " \"actions\":[\n" - " { \"name\": \"default\",\n" + " { \"cds\": \"cluster_1\",\n" + " \"child_policy\":[\n" + " { \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " } },\n" + " { \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " } }\n" + " ]\n" + " },\n" + " { \"cds\": \"cluster_2\",\n" " \"child_policy\":[\n" " { \"cds_experimental\":{\n" " \"cluster\": \"%s\"\n" @@ -1958,7 +1968,7 @@ grpc_error* XdsClient::CreateServiceConfig( " } }\n" " ]\n" "}", - cluster_name.c_str()); + cluster_name.c_str(), "blah2", "blah3"); grpc_error* error = GRPC_ERROR_NONE; *service_config = ServiceConfig::Create(json, &error); gpr_free(json); diff --git a/src/core/plugin_registry/grpc_plugin_registry.cc b/src/core/plugin_registry/grpc_plugin_registry.cc index 4af79901595..0bf515c417d 100644 --- a/src/core/plugin_registry/grpc_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_plugin_registry.cc @@ -36,10 +36,10 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); -void grpc_lb_policy_rds_init(void); -void grpc_lb_policy_rds_shutdown(void); void grpc_lb_policy_xds_init(void); void grpc_lb_policy_xds_shutdown(void); +void grpc_lb_policy_xds_routing_init(void); +void grpc_lb_policy_xds_routing_shutdown(void); void grpc_lb_policy_pick_first_init(void); void grpc_lb_policy_pick_first_shutdown(void); void grpc_lb_policy_round_robin_init(void); @@ -80,10 +80,10 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); - grpc_register_plugin(grpc_lb_policy_rds_init, - grpc_lb_policy_rds_shutdown); grpc_register_plugin(grpc_lb_policy_xds_init, grpc_lb_policy_xds_shutdown); + grpc_register_plugin(grpc_lb_policy_xds_routing_init, + grpc_lb_policy_xds_routing_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, grpc_lb_policy_pick_first_shutdown); grpc_register_plugin(grpc_lb_policy_round_robin_init, diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc index c150d64bb62..bfed2e22ddd 100644 --- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc @@ -44,8 +44,6 @@ void grpc_lb_policy_grpclb_init(void); void grpc_lb_policy_grpclb_shutdown(void); void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); -void grpc_lb_policy_rds_init(void); -void grpc_lb_policy_rds_shutdown(void); void grpc_lb_policy_xds_init(void); void grpc_lb_policy_xds_shutdown(void); void grpc_lb_policy_pick_first_init(void); @@ -88,8 +86,6 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_grpclb_shutdown); grpc_register_plugin(grpc_lb_policy_cds_init, grpc_lb_policy_cds_shutdown); - grpc_register_plugin(grpc_lb_policy_rds_init, - grpc_lb_policy_rds_shutdown); grpc_register_plugin(grpc_lb_policy_xds_init, grpc_lb_policy_xds_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, From 70ac4b641822a9abe474dc53cb017fadaf121267 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 23 Mar 2020 00:34:14 -0700 Subject: [PATCH 04/37] Basic Parsing, building service config, and Picking are all exercised with basic tests. --- .../lb_policy/xds/xds_routing.cc | 193 +++++++++++++----- .../ext/filters/client_channel/xds/xds_api.cc | 82 +++++--- .../ext/filters/client_channel/xds/xds_api.h | 10 +- .../filters/client_channel/xds/xds_client.cc | 111 +++++++--- .../filters/client_channel/xds/xds_client.h | 9 +- .../grpc/testing/xds/lds_rds_for_test.proto | 1 + test/cpp/end2end/xds_end2end_test.cc | 85 ++++++++ 7 files changed, 383 insertions(+), 108 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 7e63263c750..2f409d6746b 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -21,6 +21,7 @@ #include #include "absl/strings/str_cat.h" +#include "absl/strings/str_split.h" #include @@ -52,17 +53,23 @@ class XdsRoutingLbConfig : public LoadBalancingPolicy::Config { RefCountedPtr config; }; + using Matcher = std::pair; + using RouteVector = std::vector>; using ActionMap = std::map; - explicit XdsRoutingLbConfig(ActionMap action_map) - : action_map_(std::move(action_map)) {} + explicit XdsRoutingLbConfig(ActionMap action_map, RouteVector route_vector) + : action_map_(std::move(action_map)), + route_vector_(std::move(route_vector)) {} const char* name() const override { return kXdsRouting; } const ActionMap& action_map() const { return action_map_; } + const RouteVector& route_vector() const { return route_vector_; } + private: ActionMap action_map_; + RouteVector route_vector_; }; // xds_routing LB policy. @@ -80,12 +87,15 @@ class XdsRoutingLb : public LoadBalancingPolicy { // A simple wrapper for ref-counting a picker from the child policy. class ChildPickerWrapper : public RefCounted { public: - explicit ChildPickerWrapper(std::unique_ptr picker) - : picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { - return picker_->Pick(std::move(args)); } + explicit ChildPickerWrapper(const std::string& name, + std::unique_ptr picker) + : name_(name), picker_(std::move(picker)) {} + PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } + + std::string name() { return name_; } private: + std::string name_; std::unique_ptr picker_; }; @@ -99,7 +109,9 @@ class XdsRoutingLb : public LoadBalancingPolicy { // is the previous value in the vector and is 0 for the first element. using PickerList = InlinedVector, 1>; - XdsRoutingPicker(RefCountedPtr parent, PickerList pickers) + using PickerMap = std::map>; + + XdsRoutingPicker(RefCountedPtr parent, PickerMap pickers) : parent_(std::move(parent)), pickers_(std::move(pickers)) {} ~XdsRoutingPicker() { parent_.reset(DEBUG_LOCATION, "XdsRoutingPicker"); } @@ -107,13 +119,14 @@ class XdsRoutingLb : public LoadBalancingPolicy { private: RefCountedPtr parent_; - PickerList pickers_; + PickerMap pickers_; }; // Each XdsRoutingChild holds a ref to its parent XdsRoutingLb. class XdsRoutingChild : public InternallyRefCounted { public: - XdsRoutingChild(RefCountedPtr xds_routing_policy, const std::string& name); + XdsRoutingChild(RefCountedPtr xds_routing_policy, + const std::string& name); ~XdsRoutingChild(); void Orphan() override; @@ -132,6 +145,8 @@ class XdsRoutingLb : public LoadBalancingPolicy { return picker_wrapper_; } + std::string name() const { return name_; } + private: class Helper : public ChannelControlHelper { public: @@ -200,8 +215,31 @@ class XdsRoutingLb : public LoadBalancingPolicy { // XdsRoutingLb::PickResult XdsRoutingLb::XdsRoutingPicker::Pick(PickArgs args) { - gpr_log(GPR_INFO, "donna picked first first"); - return pickers_[0]->Pick(args); + std::string path; + for (const auto& p : *(args.initial_metadata)) { + if (memcmp(p.first.data(), ":path", static_cast(p.first.size())) == + 0) { + path = std::string(p.second.data(), static_cast(p.second.size())); + break; + } + } + std::vector v = absl::StrSplit(path, '/'); + GPR_DEBUG_ASSERT(v.size() == 3); + std::string service = v[1]; + std::string method = v[2]; + for (int i = 0; i < parent_->config_->route_vector().size(); ++i) { + if (service == parent_->config_->route_vector()[i].first.first && + ("" == parent_->config_->route_vector()[i].first.second || + method == parent_->config_->route_vector()[i].first.second)) { + auto picker = pickers_.find(parent_->config_->route_vector()[i].second); + if (picker != pickers_.end()) { + gpr_log(GPR_INFO, "XdsRouting Picked: %s for path %s", + picker->first.c_str(), path.c_str()); + return picker->second.get()->Pick(args); + } + } + } + return pickers_.begin()->second.get()->Pick(args); } // @@ -217,7 +255,8 @@ XdsRoutingLb::XdsRoutingLb(Args args) XdsRoutingLb::~XdsRoutingLb() { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { - gpr_log(GPR_INFO, "[xds_routing_lb %p] destroying xds_routing LB policy", this); + gpr_log(GPR_INFO, "[xds_routing_lb %p] destroying xds_routing LB policy", + this); } } @@ -266,8 +305,8 @@ void XdsRoutingLb::UpdateLocked(UpdateArgs args) { auto it = actions_.find(name); if (it == actions_.end()) { it = actions_.emplace(std::make_pair(name, nullptr)).first; - it->second = - MakeOrphanable(Ref(DEBUG_LOCATION, "XdsRoutingChild"), it->first); + it->second = MakeOrphanable( + Ref(DEBUG_LOCATION, "XdsRoutingChild"), it->first); } it->second->UpdateLocked(config, args.addresses, args.args); } @@ -278,7 +317,7 @@ void XdsRoutingLb::UpdateStateLocked() { // that are ready. Each child is represented by a portion of the range // proportional to its weight, such that the total range is the sum of the // weights of all children. - XdsRoutingPicker::PickerList picker_list; + XdsRoutingPicker::PickerMap picker_map; // Also count the number of children in each state, to determine the // overall state. size_t num_connecting = 0; @@ -293,7 +332,7 @@ void XdsRoutingLb::UpdateStateLocked() { } switch (child->connectivity_state()) { case GRPC_CHANNEL_READY: { - picker_list.push_back(child->picker_wrapper()); + picker_map[child_name] = child->picker_wrapper(); break; } case GRPC_CHANNEL_CONNECTING: { @@ -314,7 +353,7 @@ void XdsRoutingLb::UpdateStateLocked() { } // Determine aggregated connectivity state. grpc_connectivity_state connectivity_state; - if (picker_list.size() > 0) { + if (picker_map.size() > 0) { connectivity_state = GRPC_CHANNEL_READY; } else if (num_connecting > 0) { connectivity_state = GRPC_CHANNEL_CONNECTING; @@ -330,8 +369,8 @@ void XdsRoutingLb::UpdateStateLocked() { std::unique_ptr picker; switch (connectivity_state) { case GRPC_CHANNEL_READY: - picker = absl::make_unique(Ref(DEBUG_LOCATION, "XdsRoutingPicker"), - std::move(picker_list)); + picker = absl::make_unique( + Ref(DEBUG_LOCATION, "XdsRoutingPicker"), std::move(picker_map)); break; case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_IDLE: @@ -350,8 +389,8 @@ void XdsRoutingLb::UpdateStateLocked() { // XdsRoutingLb::XdsRoutingChild // -XdsRoutingLb::XdsRoutingChild::XdsRoutingChild(RefCountedPtr xds_routing_policy, - const std::string& name) +XdsRoutingLb::XdsRoutingChild::XdsRoutingChild( + RefCountedPtr xds_routing_policy, const std::string& name) : xds_routing_policy_(std::move(xds_routing_policy)), name_(name) { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { gpr_log(GPR_INFO, "[xds_routing_lb %p] created XdsRoutingChild %p for %s", @@ -361,7 +400,8 @@ XdsRoutingLb::XdsRoutingChild::XdsRoutingChild(RefCountedPtr xds_r XdsRoutingLb::XdsRoutingChild::~XdsRoutingChild() { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { - gpr_log(GPR_INFO, "[xds_routing_lb %p] XdsRoutingChild %p %s: destroying child", + gpr_log(GPR_INFO, + "[xds_routing_lb %p] XdsRoutingChild %p %s: destroying child", xds_routing_policy_.get(), this, name_.c_str()); } xds_routing_policy_.reset(DEBUG_LOCATION, "XdsRoutingChild"); @@ -369,7 +409,8 @@ XdsRoutingLb::XdsRoutingChild::~XdsRoutingChild() { void XdsRoutingLb::XdsRoutingChild::Orphan() { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { - gpr_log(GPR_INFO, "[xds_routing_lb %p] XdsRoutingChild %p %s: shutting down child", + gpr_log(GPR_INFO, + "[xds_routing_lb %p] XdsRoutingChild %p %s: shutting down child", xds_routing_policy_.get(), this, name_.c_str()); } // Remove the child policy's interested_parties pollset_set from the @@ -387,7 +428,8 @@ void XdsRoutingLb::XdsRoutingChild::Orphan() { Unref(); } -OrphanablePtr XdsRoutingLb::XdsRoutingChild::CreateChildPolicyLocked( +OrphanablePtr +XdsRoutingLb::XdsRoutingChild::CreateChildPolicyLocked( const grpc_channel_args* args) { LoadBalancingPolicy::Args lb_policy_args; lb_policy_args.combiner = xds_routing_policy_->combiner(); @@ -411,9 +453,9 @@ OrphanablePtr XdsRoutingLb::XdsRoutingChild::CreateChildPol return lb_policy; } -void XdsRoutingLb::XdsRoutingChild::UpdateLocked(const XdsRoutingLbConfig::ChildConfig& config, - const ServerAddressList& addresses, - const grpc_channel_args* args) { +void XdsRoutingLb::XdsRoutingChild::UpdateLocked( + const XdsRoutingLbConfig::ChildConfig& config, + const ServerAddressList& addresses, const grpc_channel_args* args) { if (xds_routing_policy_->shutting_down_) return; // Update child weight. // Reactivate if needed. @@ -434,12 +476,15 @@ void XdsRoutingLb::XdsRoutingChild::UpdateLocked(const XdsRoutingLbConfig::Child gpr_log(GPR_INFO, "[xds_routing_lb %p] XdsRoutingChild %p %s: Updating child " "policy handler %p", - xds_routing_policy_.get(), this, name_.c_str(), child_policy_.get()); + xds_routing_policy_.get(), this, name_.c_str(), + child_policy_.get()); } child_policy_->UpdateLocked(std::move(update_args)); } -void XdsRoutingLb::XdsRoutingChild::ExitIdleLocked() { child_policy_->ExitIdleLocked(); } +void XdsRoutingLb::XdsRoutingChild::ExitIdleLocked() { + child_policy_->ExitIdleLocked(); +} void XdsRoutingLb::XdsRoutingChild::ResetBackoffLocked() { child_policy_->ResetBackoffLocked(); @@ -459,7 +504,8 @@ void XdsRoutingLb::XdsRoutingChild::DeactivateLocked() { delayed_removal_timer_callback_pending_ = true; } -void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimer(void* arg, grpc_error* error) { +void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimer(void* arg, + grpc_error* error) { XdsRoutingChild* self = static_cast(arg); self->xds_routing_policy_->combiner()->Run( GRPC_CLOSURE_INIT(&self->on_delayed_removal_timer_, @@ -467,8 +513,8 @@ void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimer(void* arg, grpc_error* GRPC_ERROR_REF(error)); } -void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked(void* arg, - grpc_error* error) { +void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked( + void* arg, grpc_error* error) { XdsRoutingChild* self = static_cast(arg); self->delayed_removal_timer_callback_pending_ = false; if (error == GRPC_ERROR_NONE && !self->shutdown_) { @@ -481,21 +527,23 @@ void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked(void* arg, // XdsRoutingLb::XdsRoutingChild::Helper // -RefCountedPtr XdsRoutingLb::XdsRoutingChild::Helper::CreateSubchannel( +RefCountedPtr +XdsRoutingLb::XdsRoutingChild::Helper::CreateSubchannel( const grpc_channel_args& args) { - gpr_log(GPR_INFO, "donna XdsRoutingChild::Helper::CreateSubchannel"); + gpr_log(GPR_INFO, "XdsRoutingChild::Helper::CreateSubchannel"); if (xds_routing_child_->xds_routing_policy_->shutting_down_) return nullptr; - return xds_routing_child_->xds_routing_policy_->channel_control_helper()->CreateSubchannel( - args); + return xds_routing_child_->xds_routing_policy_->channel_control_helper() + ->CreateSubchannel(args); } void XdsRoutingLb::XdsRoutingChild::Helper::UpdateState( grpc_connectivity_state state, std::unique_ptr picker) { - gpr_log(GPR_INFO, "donna XdsRoutingChild::Helper::UpdateState"); + gpr_log(GPR_INFO, "XdsRoutingChild::Helper::UpdateState %s", + xds_routing_child_->name().c_str()); if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; // Cache the picker in the XdsRoutingChild. - xds_routing_child_->picker_wrapper_ = - MakeRefCounted(std::move(picker)); + xds_routing_child_->picker_wrapper_ = MakeRefCounted( + xds_routing_child_->name(), std::move(picker)); // Decide what state to report for aggregation purposes. // If we haven't seen a failure since the last time we were in state // READY, then we report the state change as-is. However, once we do see @@ -516,14 +564,15 @@ void XdsRoutingLb::XdsRoutingChild::Helper::UpdateState( void XdsRoutingLb::XdsRoutingChild::Helper::RequestReresolution() { if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; - xds_routing_child_->xds_routing_policy_->channel_control_helper()->RequestReresolution(); + xds_routing_child_->xds_routing_policy_->channel_control_helper() + ->RequestReresolution(); } -void XdsRoutingLb::XdsRoutingChild::Helper::AddTraceEvent(TraceSeverity severity, - StringView message) { +void XdsRoutingLb::XdsRoutingChild::Helper::AddTraceEvent( + TraceSeverity severity, StringView message) { if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; - xds_routing_child_->xds_routing_policy_->channel_control_helper()->AddTraceEvent(severity, - message); + xds_routing_child_->xds_routing_policy_->channel_control_helper() + ->AddTraceEvent(severity, message); } // @@ -565,11 +614,14 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { for (const auto& p : it->second.array_value()) { auto it_cds = p.object_value().find("cds"); auto it_weighted_target = p.object_value().find("weighted_target"); - if (it_cds == p.object_value().end() && it_weighted_target == p.object_value().end()) { + if (it_cds == p.object_value().end() && + it_weighted_target == p.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:actions error: each action needs to be either cds or weighted target")); + "field:actions error: each action needs to be either cds or " + "weighted target")); } - auto it_name = (it_cds == p.object_value().end() ? it_weighted_target : it_cds); + auto it_name = + (it_cds == p.object_value().end() ? it_weighted_target : it_cds); auto it_child_policy = p.object_value().find("child_policy"); if (it_child_policy == p.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -594,12 +646,57 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { } } } + XdsRoutingLbConfig::RouteVector route_vector; + auto route_iter = json.object_value().find("routes"); + if (route_iter == json.object_value().end()) { + gpr_log(GPR_INFO, "No routes specified"); + } else if (route_iter->second.type() != Json::Type::ARRAY) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:type should be array")); + } else { + for (const auto& p : route_iter->second.array_value()) { + auto method_name = p.object_value().find("methodName"); + if (method_name == p.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:methodName is required")); + } else { + auto action_name = p.object_value().find("action"); + if (action_name == p.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:action is required")); + } else { + XdsRoutingLbConfig::Matcher matcher; + auto service = method_name->second.object_value().find("service"); + auto method = method_name->second.object_value().find("method"); + if (service == method_name->second.object_value().end() && + method != method_name->second.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:methodName error: service is empty when method is " + "not")); + } + if (service != method_name->second.object_value().end()) { + matcher.first = service->second.string_value(); + } else { + matcher.first = ""; + } + if (method != method_name->second.object_value().end()) { + matcher.second = method->second.string_value(); + } else { + matcher.first = ""; + } + route_vector.emplace_back(matcher, + action_name->second.string_value()); + } + } + } + } if (!error_list.empty()) { *error = GRPC_ERROR_CREATE_FROM_VECTOR( "xds_routing_experimental LB policy config", &error_list); return nullptr; } - return MakeRefCounted(std::move(action_map)); + return MakeRefCounted(std::move(action_map), + std::move(route_vector)); } private: diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 9f0b17f5865..b8a07c95bf0 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -24,6 +24,7 @@ #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" +#include "absl/strings/str_split.h" #include #include @@ -1011,34 +1012,61 @@ grpc_error* RouteConfigParse( return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No route found in the virtual host."); } - // Only look at the last one in the route list (the default route), - const envoy_api_v2_route_Route* route = routes[size - 1]; - // Validate that the match field must have a prefix field which is an empty - // string. - const envoy_api_v2_route_RouteMatch* match = - envoy_api_v2_route_Route_match(route); - if (!envoy_api_v2_route_RouteMatch_has_prefix(match)) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "No prefix field found in RouteMatch."); - } - const upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); - if (!upb_strview_eql(prefix, upb_strview_makez(""))) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Prefix is not empty string."); - } - if (!envoy_api_v2_route_Route_has_route(route)) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "No RouteAction found in route."); - } - const envoy_api_v2_route_RouteAction* route_action = - envoy_api_v2_route_Route_route(route); - // Get the cluster in the RouteAction. - if (!envoy_api_v2_route_RouteAction_has_cluster(route_action)) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "No cluster found in RouteAction."); + + for (size_t i = 0; i < size; ++i) { + const envoy_api_v2_route_Route* route = routes[i]; + const envoy_api_v2_route_RouteMatch* match = + envoy_api_v2_route_Route_match(route); + XdsApi::RdsRoute rds_route; + const upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); + const upb_strview path = envoy_api_v2_route_RouteMatch_path(match); + if (!upb_strview_eql(prefix, upb_strview_makez(""))) { + std::string prefix_string = std::string(prefix.data, prefix.size); + std::vector v = absl::StrSplit(prefix_string, '/'); + if (v.size() != 2) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix not in the required format of /service/"); + } + rds_route.service = v[1]; + if (!upb_strview_eql(path, upb_strview_makez(""))) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix is not empty string, path cannot also be non-empty."); + } + } else if (!upb_strview_eql(path, upb_strview_makez(""))) { + std::string path_string = std::string(path.data, path.size); + std::vector v = absl::StrSplit(path_string, '/'); + if (v.size() != 3) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Path not in the required format of /service/method"); + } + rds_route.service = v[1]; + rds_route.method = v[2]; + if (!upb_strview_eql(prefix, upb_strview_makez(""))) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Path is not empty string, prefix cannot also be non-empty."); + } + } + if (!envoy_api_v2_route_Route_has_route(route)) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "No RouteAction found in route."); + } + const envoy_api_v2_route_RouteAction* route_action = + envoy_api_v2_route_Route_route(route); + // Get the cluster in the RouteAction. + if (!envoy_api_v2_route_RouteAction_has_cluster(route_action)) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "No cluster found in RouteAction."); + } + const upb_strview action = + envoy_api_v2_route_RouteAction_cluster(route_action); + rds_route.action_name = std::string(action.data, action.size); + rds_update->routes.emplace_back(std::move(rds_route)); + gpr_log(GPR_INFO, "RouteConfigParse a route %s %s %s %s", + rds_update->routes[i].service.c_str(), + rds_update->routes[i].method.c_str(), + rds_update->routes[i].action_type.c_str(), + rds_update->routes[i].action_name.c_str()); } - const upb_strview cluster = - envoy_api_v2_route_RouteAction_cluster(route_action); - rds_update->cluster_name = std::string(cluster.data, cluster.size); return GRPC_ERROR_NONE; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h index b428aa4bbc7..175f2d3a28a 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.h +++ b/src/core/ext/filters/client_channel/xds/xds_api.h @@ -43,9 +43,15 @@ class XdsApi { static const char* kCdsTypeUrl; static const char* kEdsTypeUrl; + struct RdsRoute { + std::string service; + std::string method; + std::string action_type; + std::string action_name; + }; + struct RdsUpdate { - // The name to use in the CDS request. - std::string cluster_name; + std::vector routes; }; struct LdsUpdate { diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 11ef7252cca..0e9cd5c0c29 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -890,7 +890,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( } const std::string& cluster_name = lds_update->rds_update.has_value() - ? lds_update->rds_update.value().cluster_name + ? lds_update->rds_update.value().routes[0].action_name : ""; if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, @@ -898,6 +898,11 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( "cluster_name=%s (empty if RDS is needed to obtain it)", xds_client(), lds_update->route_config_name.c_str(), cluster_name.c_str()); + for (auto route : lds_update->rds_update.value().routes) { + gpr_log(GPR_INFO, "Create service config using %s %s %s %s", + route.service.c_str(), route.method.c_str(), + route.action_type.c_str(), route.action_name.c_str()); + } } auto& lds_state = state_map_[XdsApi::kLdsTypeUrl]; auto& state = lds_state.subscribed_resources[xds_client()->server_name_]; @@ -917,15 +922,15 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( XdsApi::kRdsTypeUrl, xds_client()->route_config_name_, /*delay_unsubscription=*/!lds_update->route_config_name.empty()); } - xds_client()->route_config_name_ = std::move(lds_update->route_config_name); + xds_client()->route_config_name_ = lds_update->route_config_name; if (lds_update->rds_update.has_value()) { // If cluster_name was found inlined in LDS response, notify the watcher // immediately. xds_client()->cluster_name_ = - std::move(lds_update->rds_update.value().cluster_name); + lds_update->rds_update.value().routes[0].action_name; RefCountedPtr service_config; grpc_error* error = xds_client()->CreateServiceConfig( - xds_client()->cluster_name_, &service_config); + lds_update->rds_update.value(), &service_config); if (error == GRPC_ERROR_NONE) { xds_client()->service_config_watcher_->OnServiceConfigChanged( std::move(service_config)); @@ -951,14 +956,14 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate( } if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, "[xds_client %p] RDS update received: cluster_name=%s", - xds_client(), rds_update->cluster_name.c_str()); + xds_client(), rds_update->routes[0].action_name.c_str()); } auto& rds_state = state_map_[XdsApi::kRdsTypeUrl]; auto& state = rds_state.subscribed_resources[xds_client()->route_config_name_]; if (state != nullptr) state->Finish(); // Ignore identical update. - if (xds_client()->cluster_name_ == rds_update->cluster_name) { + if (xds_client()->cluster_name_ == rds_update->routes[0].action_name) { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, "[xds_client %p] RDS update identical to current, ignoring.", @@ -966,11 +971,11 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate( } return; } - xds_client()->cluster_name_ = std::move(rds_update->cluster_name); + xds_client()->cluster_name_ = rds_update->routes[0].action_name; // Notify the watcher. RefCountedPtr service_config; - grpc_error* error = xds_client()->CreateServiceConfig( - xds_client()->cluster_name_, &service_config); + grpc_error* error = + xds_client()->CreateServiceConfig(rds_update.value(), &service_config); if (error == GRPC_ERROR_NONE) { xds_client()->service_config_watcher_->OnServiceConfigChanged( std::move(service_config)); @@ -2038,38 +2043,84 @@ void XdsClient::ResetBackoff() { } } -grpc_error* XdsClient::CreateServiceConfig( - const std::string& cluster_name, - RefCountedPtr* service_config) const { +char* XdsClient::CreateServiceConfigActionCluster( + const std::string& cluster_name, const bool without_comma) const { + const char* last_line = without_comma ? "}" : "},"; char* json; gpr_asprintf(&json, - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"xds_routing_experimental\":{\n" - " \"actions\":[\n" - " { \"cds\": \"cluster_1\",\n" - " \"child_policy\":[\n" - " { \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " } },\n" - " { \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " } }\n" - " ]\n" - " },\n" - " { \"cds\": \"cluster_2\",\n" + " { \"cds\": \"%s\",\n" " \"child_policy\":[\n" " { \"cds_experimental\":{\n" " \"cluster\": \"%s\"\n" " } }\n" " ]\n" - " } ]\n" + " %s\n", + cluster_name.c_str(), cluster_name.c_str(), last_line); + return json; +} + +char* XdsClient::CreateServiceConfigRoute(const std::string& cluster_name, + const std::string& service, + const std::string& method, + const bool without_comma) const { + const char* last_line = without_comma ? "}" : "},"; + char* json; + gpr_asprintf(&json, + " { \"methodName\":\n" + " { \"service\": \"%s\",\n" + " \"method\": \"%s\"},\n" + " \"action\": \"%s\"\n" + " %s\n", + service.c_str(), method.c_str(), cluster_name.c_str(), + last_line); + return json; +} + +grpc_error* XdsClient::CreateServiceConfig( + const XdsApi::RdsUpdate& rds_update, + RefCountedPtr* service_config) const { + gpr_strvec v; + gpr_strvec_init(&v); + char* json_start; + gpr_asprintf(&json_start, + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"xds_routing_experimental\":{\n" + " \"actions\":[\n"); + gpr_strvec_add(&v, json_start); + for (size_t i = 0; i < rds_update.routes.size(); ++i) { + auto route = rds_update.routes[i]; + // TODO: (donnadionne) CreateServiceConfigActionWeightedTarget + char* action = CreateServiceConfigActionCluster( + route.action_name.c_str(), i == (rds_update.routes.size() - 1)); + gpr_strvec_add(&v, action); + } + char* json_transition; + gpr_asprintf(&json_transition, + " ],\n" + " \"routes\":[\n"); + gpr_strvec_add(&v, json_transition); + for (size_t i = 0; i < rds_update.routes.size(); ++i) { + auto route_info = rds_update.routes[i]; + char* route = CreateServiceConfigRoute( + route_info.action_name.c_str(), route_info.service.c_str(), + route_info.method.c_str(), i == (rds_update.routes.size() - 1)); + gpr_strvec_add(&v, route); + } + char* json_end; + gpr_asprintf(&json_end, + " ]\n" " } }\n" " ]\n" - "}", - cluster_name.c_str(), "blah2", "blah3"); + "}"); + gpr_strvec_add(&v, json_end); + size_t len; + char* json = gpr_strvec_flatten(&v, &len); + gpr_strvec_destroy(&v); grpc_error* error = GRPC_ERROR_NONE; *service_config = ServiceConfig::Create(json, &error); + gpr_log(GPR_INFO, "Built service config: \"%s\"", + service_config->get()->json_string().c_str()); gpr_free(json); return error; } diff --git a/src/core/ext/filters/client_channel/xds/xds_client.h b/src/core/ext/filters/client_channel/xds/xds_client.h index 228b9a21b47..d29ac12e659 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.h +++ b/src/core/ext/filters/client_channel/xds/xds_client.h @@ -218,8 +218,15 @@ class XdsClient : public InternallyRefCounted { // Sends an error notification to all watchers. void NotifyOnError(grpc_error* error); + char* CreateServiceConfigActionCluster( + const std::string& cluster_name, const bool without_comma = false) const; + char* CreateServiceConfigRoute(const std::string& prefix, + const std::string& service, + const std::string& method, + const bool without_comma = false) const; + grpc_error* CreateServiceConfig( - const std::string& cluster_name, + const XdsApi::RdsUpdate& rds_update, RefCountedPtr* service_config) const; XdsApi::ClusterLoadReportMap BuildLoadReportSnapshot(); diff --git a/src/proto/grpc/testing/xds/lds_rds_for_test.proto b/src/proto/grpc/testing/xds/lds_rds_for_test.proto index 03d700c9017..acf2186115c 100644 --- a/src/proto/grpc/testing/xds/lds_rds_for_test.proto +++ b/src/proto/grpc/testing/xds/lds_rds_for_test.proto @@ -34,6 +34,7 @@ message RouteMatch { // If specified, the route is a prefix rule meaning that the prefix must // match the beginning of the *:path* header. string prefix = 1; + string path = 2; } } diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 612f8d42c65..15c594cdf8d 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1921,6 +1921,91 @@ TEST_P(LdsTest, Timeout) { CheckRpcSendFailure(); } +TEST_P(LdsTest, XdsRoutingPathMatching) { + const char* kNewClusterName = "new_cluster_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); + // We need to wait for all backends to come online. + WaitForAllBackends(0, 2); + // Populate new EDS resource. + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewClusterName), + kNewClusterName); + // Populate new CDS resource. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster, kNewClusterName); + // Change RDS resource to point to new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_match() + ->set_path("/grpc.testing.EchoTestService/Echo"); + //->set_prefix("/dgrpc.testing.EchoTestService"); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster(kNewClusterName); + Listener listener = + balancers_[0]->ads_service()->BuildListener(new_route_config); + balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); + // Wait for all new backends to be used. + std::tuple counts = WaitForAllBackends(2, 4); + // Make sure no RPCs failed in the transition. + EXPECT_EQ(0, std::get<1>(counts)); +} + +TEST_P(LdsTest, XdsRoutingPrefixMatching) { + const char* kNewClusterName = "new_cluster_name"; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); + // We need to wait for all backends to come online. + WaitForAllBackends(0, 2); + // Populate new EDS resource. + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(2, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewClusterName), + kNewClusterName); + // Populate new CDS resource. + Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); + new_cluster.set_name(kNewClusterName); + balancers_[0]->ads_service()->SetCdsResource(new_cluster, kNewClusterName); + // Change RDS resource to point to new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_match() + ->set_prefix("/grpc.testing.EchoTestService"); + new_route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_route() + ->set_cluster(kNewClusterName); + Listener listener = + balancers_[0]->ads_service()->BuildListener(new_route_config); + balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); + // Wait for all new backends to be used. + std::tuple counts = WaitForAllBackends(2, 4); + // Make sure no RPCs failed in the transition. + EXPECT_EQ(0, std::get<1>(counts)); +} + using RdsTest = BasicTest; // Tests that RDS client should send an ACK upon correct RDS response. From f487d1be61d833a8a8c62b5886ad258dd5794800 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 30 Mar 2020 23:31:54 -0700 Subject: [PATCH 05/37] Addressing code review comments. --- .../lb_policy/xds/xds_routing.cc | 303 ++++++++++-------- .../ext/filters/client_channel/xds/xds_api.cc | 34 +- .../ext/filters/client_channel/xds/xds_api.h | 3 +- .../filters/client_channel/xds/xds_client.cc | 181 +++++------ .../filters/client_channel/xds/xds_client.h | 8 - .../grpc_unsecure_plugin_registry.cc | 4 + test/cpp/end2end/xds_end2end_test.cc | 23 +- 7 files changed, 273 insertions(+), 283 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 2f409d6746b..f23db70bdb4 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -22,6 +22,7 @@ #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" +#include "absl/strings/string_view.h" #include @@ -52,24 +53,26 @@ class XdsRoutingLbConfig : public LoadBalancingPolicy::Config { struct ChildConfig { RefCountedPtr config; }; - - using Matcher = std::pair; - using RouteVector = std::vector>; + struct Matcher { + std::string service; + std::string method; + }; + using RouteTable = std::vector>; using ActionMap = std::map; - explicit XdsRoutingLbConfig(ActionMap action_map, RouteVector route_vector) + XdsRoutingLbConfig(ActionMap action_map, RouteTable route_table) : action_map_(std::move(action_map)), - route_vector_(std::move(route_vector)) {} + route_table_(std::move(route_table)) {} const char* name() const override { return kXdsRouting; } const ActionMap& action_map() const { return action_map_; } - const RouteVector& route_vector() const { return route_vector_; } + const RouteTable& route_table() const { return route_table_; } private: ActionMap action_map_; - RouteVector route_vector_; + RouteTable route_table_; }; // xds_routing LB policy. @@ -87,39 +90,37 @@ class XdsRoutingLb : public LoadBalancingPolicy { // A simple wrapper for ref-counting a picker from the child policy. class ChildPickerWrapper : public RefCounted { public: - explicit ChildPickerWrapper(const std::string& name, - std::unique_ptr picker) - : name_(name), picker_(std::move(picker)) {} + ChildPickerWrapper(std::string name, + std::unique_ptr picker) + : name_(std::move(name)), picker_(std::move(picker)) {} PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } - std::string name() { return name_; } + const std::string& name() { return name_; } private: std::string name_; std::unique_ptr picker_; }; - // Picks a child using stateless WRR and then delegates to that + // Picks a child using prefix or path matching and then delegates to that // child's picker. - class XdsRoutingPicker : public SubchannelPicker { + class RoutePicker : public SubchannelPicker { public: - // Maintains a xds_routing list of pickers from each child that is in - // ready state. The first element in the pair represents the end of a - // range proportional to the child's weight. The start of the range - // is the previous value in the vector and is 0 for the first element. - using PickerList = InlinedVector, 1>; + struct Route { + XdsRoutingLbConfig::Matcher matcher; + RefCountedPtr picker; + }; - using PickerMap = std::map>; + // Maintains an ordered xds route table as provided by RDS response. + using RouteTable = std::vector; - XdsRoutingPicker(RefCountedPtr parent, PickerMap pickers) - : parent_(std::move(parent)), pickers_(std::move(pickers)) {} - ~XdsRoutingPicker() { parent_.reset(DEBUG_LOCATION, "XdsRoutingPicker"); } + RoutePicker(RouteTable route_table) + : route_table_(std::move(route_table)) {} PickResult Pick(PickArgs args) override; private: - RefCountedPtr parent_; - PickerMap pickers_; + RouteTable route_table_; }; // Each XdsRoutingChild holds a ref to its parent XdsRoutingLb. @@ -145,8 +146,6 @@ class XdsRoutingLb : public LoadBalancingPolicy { return picker_wrapper_; } - std::string name() const { return name_; } - private: class Helper : public ChannelControlHelper { public: @@ -211,35 +210,29 @@ class XdsRoutingLb : public LoadBalancingPolicy { }; // -// XdsRoutingLb::XdsRoutingPicker +// XdsRoutingLb::RoutePicker // -XdsRoutingLb::PickResult XdsRoutingLb::XdsRoutingPicker::Pick(PickArgs args) { - std::string path; +XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { + absl::string_view path; for (const auto& p : *(args.initial_metadata)) { - if (memcmp(p.first.data(), ":path", static_cast(p.first.size())) == - 0) { - path = std::string(p.second.data(), static_cast(p.second.size())); + if (p.first == ":path") { + path = p.second; break; } } - std::vector v = absl::StrSplit(path, '/'); - GPR_DEBUG_ASSERT(v.size() == 3); - std::string service = v[1]; - std::string method = v[2]; - for (int i = 0; i < parent_->config_->route_vector().size(); ++i) { - if (service == parent_->config_->route_vector()[i].first.first && - ("" == parent_->config_->route_vector()[i].first.second || - method == parent_->config_->route_vector()[i].first.second)) { - auto picker = pickers_.find(parent_->config_->route_vector()[i].second); - if (picker != pickers_.end()) { - gpr_log(GPR_INFO, "XdsRouting Picked: %s for path %s", - picker->first.c_str(), path.c_str()); - return picker->second.get()->Pick(args); + std::vector v = absl::StrSplit(path.substr(1), '/'); + for (int i = 0; i < route_table_.size(); ++i) { + if (v[0] == route_table_[i].matcher.service && + ("" == route_table_[i].matcher.method || + v[1] == route_table_[i].matcher.method)) { + auto picker = route_table_[i].picker; + if (picker != nullptr) { + return picker.get()->Pick(args); } } } - return pickers_.begin()->second.get()->Pick(args); + return route_table_[route_table_.size() - 1].picker.get()->Pick(args); } // @@ -313,11 +306,7 @@ void XdsRoutingLb::UpdateLocked(UpdateArgs args) { } void XdsRoutingLb::UpdateStateLocked() { - // Construct a new picker which maintains a map of all child pickers - // that are ready. Each child is represented by a portion of the range - // proportional to its weight, such that the total range is the sum of the - // weights of all children. - XdsRoutingPicker::PickerMap picker_map; + std::map> picker_map; // Also count the number of children in each state, to determine the // overall state. size_t num_connecting = 0; @@ -367,10 +356,19 @@ void XdsRoutingLb::UpdateStateLocked() { ConnectivityStateName(connectivity_state)); } std::unique_ptr picker; + RoutePicker::RouteTable route_table; switch (connectivity_state) { case GRPC_CHANNEL_READY: - picker = absl::make_unique( - Ref(DEBUG_LOCATION, "XdsRoutingPicker"), std::move(picker_map)); + for (int i = 0; i < config_->route_table().size(); ++i) { + RoutePicker::Route route; + route.matcher = config_->route_table()[i].first; + auto child_picker = picker_map.find(config_->route_table()[i].second); + if (child_picker != picker_map.end()) { + route.picker = child_picker->second; + } + route_table.push_back(std::move(route)); + } + picker = absl::make_unique(std::move(route_table)); break; case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_IDLE: @@ -460,6 +458,7 @@ void XdsRoutingLb::XdsRoutingChild::UpdateLocked( // Update child weight. // Reactivate if needed. if (delayed_removal_timer_callback_pending_) { + delayed_removal_timer_callback_pending_ = false; grpc_timer_cancel(&delayed_removal_timer_); } // Create child policy if needed. @@ -530,7 +529,6 @@ void XdsRoutingLb::XdsRoutingChild::OnDelayedRemovalTimerLocked( RefCountedPtr XdsRoutingLb::XdsRoutingChild::Helper::CreateSubchannel( const grpc_channel_args& args) { - gpr_log(GPR_INFO, "XdsRoutingChild::Helper::CreateSubchannel"); if (xds_routing_child_->xds_routing_policy_->shutting_down_) return nullptr; return xds_routing_child_->xds_routing_policy_->channel_control_helper() ->CreateSubchannel(args); @@ -538,12 +536,15 @@ XdsRoutingLb::XdsRoutingChild::Helper::CreateSubchannel( void XdsRoutingLb::XdsRoutingChild::Helper::UpdateState( grpc_connectivity_state state, std::unique_ptr picker) { - gpr_log(GPR_INFO, "XdsRoutingChild::Helper::UpdateState %s", - xds_routing_child_->name().c_str()); + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, + "XdsRoutingChild::Helper::UpdateState child %s, state %d, piker %p", + xds_routing_child_->name_.c_str(), state, picker.get()); + } if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; // Cache the picker in the XdsRoutingChild. xds_routing_child_->picker_wrapper_ = MakeRefCounted( - xds_routing_child_->name(), std::move(picker)); + xds_routing_child_->name_, std::move(picker)); // Decide what state to report for aggregation purposes. // If we haven't seen a failure since the last time we were in state // READY, then we report the state change as-is. However, once we do see @@ -607,87 +608,67 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:actions error:required field not present")); - } else if (it->second.type() != Json::Type::ARRAY) { + } else if (it->second.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:actions error:type should be array")); + "field:actions error:type should be object")); } else { - for (const auto& p : it->second.array_value()) { - auto it_cds = p.object_value().find("cds"); - auto it_weighted_target = p.object_value().find("weighted_target"); - if (it_cds == p.object_value().end() && - it_weighted_target == p.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:actions error: each action needs to be either cds or " - "weighted target")); - } - auto it_name = - (it_cds == p.object_value().end() ? it_weighted_target : it_cds); - auto it_child_policy = p.object_value().find("child_policy"); - if (it_child_policy == p.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:actions error: each action needs child policies")); - } + for (const auto& p : it->second.object_value()) { XdsRoutingLbConfig::ChildConfig child_config; std::vector child_errors = - ParseChildConfig(it_child_policy->second, &child_config); + ParseChildConfig(p.second, &child_config); if (!child_errors.empty()) { // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error // string is not static in this case. grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( - absl::StrCat("field:actions name:", - it_name->second.string_value()) - .c_str()); + absl::StrCat("field:actions name:", p.first).c_str()); for (grpc_error* child_error : child_errors) { error = grpc_error_add_child(error, child_error); } error_list.push_back(error); } else { - action_map[it_name->second.string_value()] = std::move(child_config); + action_map[p.first] = std::move(child_config); } } } - XdsRoutingLbConfig::RouteVector route_vector; - auto route_iter = json.object_value().find("routes"); - if (route_iter == json.object_value().end()) { - gpr_log(GPR_INFO, "No routes specified"); - } else if (route_iter->second.type() != Json::Type::ARRAY) { + XdsRoutingLbConfig::RouteTable route_table; + it = json.object_value().find("routes"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:required field not present")); + } else if (it->second.type() != Json::Type::ARRAY) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:routes error:type should be array")); } else { - for (const auto& p : route_iter->second.array_value()) { - auto method_name = p.object_value().find("methodName"); - if (method_name == p.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes error:methodName is required")); - } else { - auto action_name = p.object_value().find("action"); - if (action_name == p.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes error:action is required")); - } else { - XdsRoutingLbConfig::Matcher matcher; - auto service = method_name->second.object_value().find("service"); - auto method = method_name->second.object_value().find("method"); - if (service == method_name->second.object_value().end() && - method != method_name->second.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName error: service is empty when method is " - "not")); - } - if (service != method_name->second.object_value().end()) { - matcher.first = service->second.string_value(); - } else { - matcher.first = ""; - } - if (method != method_name->second.object_value().end()) { - matcher.second = method->second.string_value(); - } else { - matcher.first = ""; - } - route_vector.emplace_back(matcher, - action_name->second.string_value()); + for (const auto& route : it->second.array_value()) { + // Parse methodName. + XdsRoutingLbConfig::Matcher matcher; + std::vector route_errors = + ParseRouteConfig(route.object_value(), &matcher); + if (!route_errors.empty()) { + // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error + // string is not static in this case. + grpc_error* error = + GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:routes error"); + for (grpc_error* route_error : route_errors) { + error = grpc_error_add_child(error, route_error); } + error_list.push_back(error); } + // Parse action. + std::string cluster_name; + std::vector action_errors = + ParseActionConfig(route.object_value(), &cluster_name); + if (!action_errors.empty()) { + // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error + // string is not static in this case. + grpc_error* error = + GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:actions error:"); + for (grpc_error* action_error : action_errors) { + error = grpc_error_add_child(error, action_error); + } + error_list.push_back(error); + } + route_table.emplace_back(std::move(matcher), std::move(cluster_name)); } } if (!error_list.empty()) { @@ -696,28 +677,92 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { return nullptr; } return MakeRefCounted(std::move(action_map), - std::move(route_vector)); + std::move(route_table)); } private: static std::vector ParseChildConfig( const Json& json, XdsRoutingLbConfig::ChildConfig* child_config) { std::vector error_list; - if (json.type() != Json::Type::ARRAY) { + if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "value should be of type array")); + "value should be of type object")); return error_list; } - grpc_error* parse_error = GRPC_ERROR_NONE; - child_config->config = - LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( - json.array_value(), &parse_error); - if (child_config->config == nullptr) { - GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); - std::vector child_errors; - child_errors.push_back(parse_error); + auto it = json.object_value().find("child_policy"); + if (it != json.object_value().end()) { + grpc_error* parse_error = GRPC_ERROR_NONE; + child_config->config = + LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second, + &parse_error); + if (child_config->config == nullptr) { + GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); + std::vector child_errors; + child_errors.push_back(parse_error); + error_list.push_back( + GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); + } + } else { error_list.push_back( - GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); + GRPC_ERROR_CREATE_FROM_STATIC_STRING("did not find childPolicy")); + } + return error_list; + } + + static std::vector ParseRouteConfig( + const Json& json, XdsRoutingLbConfig::Matcher* route_config) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "value should be of type object")); + return error_list; + } + auto method_name = json.object_value().find("methodName"); + if (method_name == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:methodName is required")); + } else if (method_name->second.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:methodName error: type should be object")); + } else { + auto service = method_name->second.object_value().find("service"); + auto method = method_name->second.object_value().find("method"); + if (service != method_name->second.object_value().end()) { + route_config->service = service->second.string_value(); + } else { + route_config->service = ""; + } + if (method != method_name->second.object_value().end()) { + route_config->method = method->second.string_value(); + } else { + route_config->method = ""; + } + if ((route_config->service == "") && (route_config->method != "")) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:methodName error: service is empty when method is " + "not")); + } + } + return error_list; + } + + static std::vector ParseActionConfig(const Json& json, + std::string* cluster_name) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "value should be of type object")); + return error_list; + } + auto action_name = json.object_value().find("action"); + if (action_name == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes error:action is required")); + } else if (action_name->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:methodName error:type should be string")); + } else { + *cluster_name = action_name->second.string_value(); } return error_list; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index b8a07c95bf0..76558647ee2 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1012,7 +1012,6 @@ grpc_error* RouteConfigParse( return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No route found in the virtual host."); } - for (size_t i = 0; i < size; ++i) { const envoy_api_v2_route_Route* route = routes[i]; const envoy_api_v2_route_RouteMatch* match = @@ -1020,28 +1019,28 @@ grpc_error* RouteConfigParse( XdsApi::RdsRoute rds_route; const upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); const upb_strview path = envoy_api_v2_route_RouteMatch_path(match); - if (!upb_strview_eql(prefix, upb_strview_makez(""))) { - std::string prefix_string = std::string(prefix.data, prefix.size); - std::vector v = absl::StrSplit(prefix_string, '/'); - if (v.size() != 2) { + if (prefix.size > 0) { + std::vector v = absl::StrSplit( + absl::string_view(prefix.data, prefix.size).substr(1), '/'); + if (v.size() != 1) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix not in the required format of /service/"); } - rds_route.service = v[1]; - if (!upb_strview_eql(path, upb_strview_makez(""))) { + rds_route.service = std::string(v[0].data(), v[0].size()); + if (path.size > 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix is not empty string, path cannot also be non-empty."); } - } else if (!upb_strview_eql(path, upb_strview_makez(""))) { - std::string path_string = std::string(path.data, path.size); - std::vector v = absl::StrSplit(path_string, '/'); - if (v.size() != 3) { + } else if (path.size > 0) { + std::vector v = absl::StrSplit( + absl::string_view(path.data, path.size).substr(1), '/'); + if (v.size() != 2) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path not in the required format of /service/method"); } - rds_route.service = v[1]; - rds_route.method = v[2]; - if (!upb_strview_eql(prefix, upb_strview_makez(""))) { + rds_route.service = std::string(v[0].data(), v[0].size()); + rds_route.method = std::string(v[1].data(), v[1].size()); + if (prefix.size > 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path is not empty string, prefix cannot also be non-empty."); } @@ -1059,13 +1058,12 @@ grpc_error* RouteConfigParse( } const upb_strview action = envoy_api_v2_route_RouteAction_cluster(route_action); - rds_route.action_name = std::string(action.data, action.size); + rds_route.cluster_name = std::string(action.data, action.size); rds_update->routes.emplace_back(std::move(rds_route)); - gpr_log(GPR_INFO, "RouteConfigParse a route %s %s %s %s", + gpr_log(GPR_INFO, "RouteConfigParse a route %s %s %s", rds_update->routes[i].service.c_str(), rds_update->routes[i].method.c_str(), - rds_update->routes[i].action_type.c_str(), - rds_update->routes[i].action_name.c_str()); + rds_update->routes[i].cluster_name.c_str()); } return GRPC_ERROR_NONE; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h index 175f2d3a28a..caa2c748a97 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.h +++ b/src/core/ext/filters/client_channel/xds/xds_api.h @@ -46,8 +46,7 @@ class XdsApi { struct RdsRoute { std::string service; std::string method; - std::string action_type; - std::string action_name; + std::string cluster_name; }; struct RdsUpdate { diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 0e9cd5c0c29..c9679da7b42 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -22,6 +22,7 @@ #include #include +#include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include @@ -70,6 +71,35 @@ namespace grpc_core { TraceFlag grpc_xds_client_trace(false, "xds_client"); +namespace { + +std::string CreateServiceConfigActionCluster(const std::string& cluster_name) { + std::string json = absl::StrFormat( + " \"cds:%s\":{\n" + " \"child_policy\":[\n" + " { \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " } }\n" + " ]\n" + " }", + cluster_name.c_str(), cluster_name.c_str()); + return json; +} + +std::string CreateServiceConfigRoute(const std::string& cluster_name, + const std::string& service, + const std::string& method) { + std::string json = absl::StrFormat( + " { \"methodName\":\n" + " { \"service\": \"%s\",\n" + " \"method\": \"%s\"},\n" + " \"action\": \"cds:%s\"\n" + " }", + service.c_str(), method.c_str(), cluster_name.c_str()); + return json; +} + +} // namespace // // Internal class declarations // @@ -888,35 +918,23 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( "LDS update does not include requested resource")); return; } - const std::string& cluster_name = - lds_update->rds_update.has_value() - ? lds_update->rds_update.value().routes[0].action_name - : ""; if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { gpr_log(GPR_INFO, - "[xds_client %p] LDS update received: route_config_name=%s, " - "cluster_name=%s (empty if RDS is needed to obtain it)", - xds_client(), lds_update->route_config_name.c_str(), - cluster_name.c_str()); - for (auto route : lds_update->rds_update.value().routes) { - gpr_log(GPR_INFO, "Create service config using %s %s %s %s", - route.service.c_str(), route.method.c_str(), - route.action_type.c_str(), route.action_name.c_str()); + "[xds_client %p] LDS update received: route_config_name=%s", + xds_client(), lds_update->route_config_name.c_str()); + if (lds_update->rds_update.has_value()) { + for (const auto& route : lds_update->rds_update.value().routes) { + gpr_log(GPR_INFO, + "Create service config using route: { service=\"%s\", " + "method=\"%s\" }, cluster=\"%s\" }", + route.service.c_str(), route.method.c_str(), + route.cluster_name.c_str()); + } } } auto& lds_state = state_map_[XdsApi::kLdsTypeUrl]; auto& state = lds_state.subscribed_resources[xds_client()->server_name_]; if (state != nullptr) state->Finish(); - // Ignore identical update. - if (xds_client()->route_config_name_ == lds_update->route_config_name && - xds_client()->cluster_name_ == cluster_name) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { - gpr_log(GPR_INFO, - "[xds_client %p] LDS update identical to current, ignoring.", - xds_client()); - } - return; - } if (!xds_client()->route_config_name_.empty()) { Unsubscribe( XdsApi::kRdsTypeUrl, xds_client()->route_config_name_, @@ -924,10 +942,8 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( } xds_client()->route_config_name_ = lds_update->route_config_name; if (lds_update->rds_update.has_value()) { - // If cluster_name was found inlined in LDS response, notify the watcher - // immediately. - xds_client()->cluster_name_ = - lds_update->rds_update.value().routes[0].action_name; + // If the RouteConfiguration was found inlined in LDS response, notify the + // watcher immediately. RefCountedPtr service_config; grpc_error* error = xds_client()->CreateServiceConfig( lds_update->rds_update.value(), &service_config); @@ -954,24 +970,10 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate( "RDS update does not include requested resource")); return; } - if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { - gpr_log(GPR_INFO, "[xds_client %p] RDS update received: cluster_name=%s", - xds_client(), rds_update->routes[0].action_name.c_str()); - } auto& rds_state = state_map_[XdsApi::kRdsTypeUrl]; auto& state = rds_state.subscribed_resources[xds_client()->route_config_name_]; if (state != nullptr) state->Finish(); - // Ignore identical update. - if (xds_client()->cluster_name_ == rds_update->routes[0].action_name) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { - gpr_log(GPR_INFO, - "[xds_client %p] RDS update identical to current, ignoring.", - xds_client()); - } - return; - } - xds_client()->cluster_name_ = rds_update->routes[0].action_name; // Notify the watcher. RefCountedPtr service_config; grpc_error* error = @@ -2043,85 +2045,46 @@ void XdsClient::ResetBackoff() { } } -char* XdsClient::CreateServiceConfigActionCluster( - const std::string& cluster_name, const bool without_comma) const { - const char* last_line = without_comma ? "}" : "},"; - char* json; - gpr_asprintf(&json, - " { \"cds\": \"%s\",\n" - " \"child_policy\":[\n" - " { \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " } }\n" - " ]\n" - " %s\n", - cluster_name.c_str(), cluster_name.c_str(), last_line); - return json; -} - -char* XdsClient::CreateServiceConfigRoute(const std::string& cluster_name, - const std::string& service, - const std::string& method, - const bool without_comma) const { - const char* last_line = without_comma ? "}" : "},"; - char* json; - gpr_asprintf(&json, - " { \"methodName\":\n" - " { \"service\": \"%s\",\n" - " \"method\": \"%s\"},\n" - " \"action\": \"%s\"\n" - " %s\n", - service.c_str(), method.c_str(), cluster_name.c_str(), - last_line); - return json; -} - grpc_error* XdsClient::CreateServiceConfig( const XdsApi::RdsUpdate& rds_update, RefCountedPtr* service_config) const { - gpr_strvec v; - gpr_strvec_init(&v); - char* json_start; - gpr_asprintf(&json_start, - "{\n" - " \"loadBalancingConfig\":[\n" - " { \"xds_routing_experimental\":{\n" - " \"actions\":[\n"); - gpr_strvec_add(&v, json_start); + std::vector v; + std::string json_start = + ("{\n" + " \"loadBalancingConfig\":[\n" + " { \"xds_routing_experimental\":{\n" + " \"actions\":{\n"); + v.push_back(std::move(json_start)); + std::vector actions_vector; for (size_t i = 0; i < rds_update.routes.size(); ++i) { auto route = rds_update.routes[i]; - // TODO: (donnadionne) CreateServiceConfigActionWeightedTarget - char* action = CreateServiceConfigActionCluster( - route.action_name.c_str(), i == (rds_update.routes.size() - 1)); - gpr_strvec_add(&v, action); - } - char* json_transition; - gpr_asprintf(&json_transition, - " ],\n" - " \"routes\":[\n"); - gpr_strvec_add(&v, json_transition); + actions_vector.push_back( + CreateServiceConfigActionCluster(route.cluster_name.c_str())); + } + v.push_back(absl::StrJoin(actions_vector, ",")); + std::string json_transition = + (" },\n" + " \"routes\":[\n"); + v.push_back(std::move(json_transition)); + std::vector routes_vector; for (size_t i = 0; i < rds_update.routes.size(); ++i) { auto route_info = rds_update.routes[i]; - char* route = CreateServiceConfigRoute( - route_info.action_name.c_str(), route_info.service.c_str(), - route_info.method.c_str(), i == (rds_update.routes.size() - 1)); - gpr_strvec_add(&v, route); - } - char* json_end; - gpr_asprintf(&json_end, - " ]\n" - " } }\n" - " ]\n" - "}"); - gpr_strvec_add(&v, json_end); - size_t len; - char* json = gpr_strvec_flatten(&v, &len); - gpr_strvec_destroy(&v); + routes_vector.push_back(CreateServiceConfigRoute( + route_info.cluster_name.c_str(), route_info.service.c_str(), + route_info.method.c_str())); + } + v.push_back(absl::StrJoin(routes_vector, ",")); + std::string json_end = + (" ]\n" + " } }\n" + " ]\n" + "}"); + v.push_back(std::move(json_end)); + std::string json = absl::StrJoin(v, ""); grpc_error* error = GRPC_ERROR_NONE; - *service_config = ServiceConfig::Create(json, &error); + *service_config = ServiceConfig::Create(json.c_str(), &error); gpr_log(GPR_INFO, "Built service config: \"%s\"", service_config->get()->json_string().c_str()); - gpr_free(json); return error; } diff --git a/src/core/ext/filters/client_channel/xds/xds_client.h b/src/core/ext/filters/client_channel/xds/xds_client.h index d29ac12e659..76a45c9b09f 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.h +++ b/src/core/ext/filters/client_channel/xds/xds_client.h @@ -218,13 +218,6 @@ class XdsClient : public InternallyRefCounted { // Sends an error notification to all watchers. void NotifyOnError(grpc_error* error); - char* CreateServiceConfigActionCluster( - const std::string& cluster_name, const bool without_comma = false) const; - char* CreateServiceConfigRoute(const std::string& prefix, - const std::string& service, - const std::string& method, - const bool without_comma = false) const; - grpc_error* CreateServiceConfig( const XdsApi::RdsUpdate& rds_update, RefCountedPtr* service_config) const; @@ -254,7 +247,6 @@ class XdsClient : public InternallyRefCounted { OrphanablePtr chand_; std::string route_config_name_; - std::string cluster_name_; // One entry for each watched CDS resource. std::map cluster_map_; // One entry for each watched EDS resource. diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc index bfed2e22ddd..bdc658b76d2 100644 --- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc +++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc @@ -46,6 +46,8 @@ void grpc_lb_policy_cds_init(void); void grpc_lb_policy_cds_shutdown(void); void grpc_lb_policy_xds_init(void); void grpc_lb_policy_xds_shutdown(void); +void grpc_lb_policy_xds_routing_init(void); +void grpc_lb_policy_xds_routing_shutdown(void); void grpc_lb_policy_pick_first_init(void); void grpc_lb_policy_pick_first_shutdown(void); void grpc_lb_policy_round_robin_init(void); @@ -88,6 +90,8 @@ void grpc_register_built_in_plugins(void) { grpc_lb_policy_cds_shutdown); grpc_register_plugin(grpc_lb_policy_xds_init, grpc_lb_policy_xds_shutdown); + grpc_register_plugin(grpc_lb_policy_xds_routing_init, + grpc_lb_policy_xds_routing_shutdown); grpc_register_plugin(grpc_lb_policy_pick_first_init, grpc_lb_policy_pick_first_shutdown); grpc_register_plugin(grpc_lb_policy_round_robin_init, diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 15c594cdf8d..a710f24510e 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1946,15 +1946,9 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { // Change RDS resource to point to new cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_match() - ->set_path("/grpc.testing.EchoTestService/Echo"); - //->set_prefix("/dgrpc.testing.EchoTestService"); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->set_cluster(kNewClusterName); + auto* route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route->mutable_match()->set_path("/grpc.testing.EchoTestService/Echo"); + route->mutable_route()->set_cluster(kNewClusterName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); @@ -1989,14 +1983,9 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { // Change RDS resource to point to new cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_match() - ->set_prefix("/grpc.testing.EchoTestService"); - new_route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_route() - ->set_cluster(kNewClusterName); + auto* route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route->mutable_match()->set_prefix("/grpc.testing.EchoTestService"); + route->mutable_route()->set_cluster(kNewClusterName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); From 8e188e65400b4b0a4c1fa911ce0c0ff46791d140 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 2 Apr 2020 13:13:00 -0700 Subject: [PATCH 06/37] Addressing code review changes (before writing new test) --- .../lb_policy/xds/xds_routing.cc | 266 ++++++++++-------- .../ext/filters/client_channel/xds/xds_api.cc | 14 +- .../ext/filters/client_channel/xds/xds_api.h | 3 +- .../filters/client_channel/xds/xds_client.cc | 108 +++---- 4 files changed, 219 insertions(+), 172 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index f23db70bdb4..703f670186f 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -50,15 +50,17 @@ constexpr char kXdsRouting[] = "xds_routing_experimental"; // Config for xds_routing LB policy. class XdsRoutingLbConfig : public LoadBalancingPolicy::Config { public: - struct ChildConfig { - RefCountedPtr config; - }; struct Matcher { std::string service; std::string method; }; - using RouteTable = std::vector>; - using ActionMap = std::map; + struct Route { + Matcher matcher; + std::string action; + }; + using RouteTable = std::vector; + using ActionMap = + std::map>; XdsRoutingLbConfig(ActionMap action_map, RouteTable route_table) : action_map_(std::move(action_map)), @@ -95,7 +97,7 @@ class XdsRoutingLb : public LoadBalancingPolicy { : name_(std::move(name)), picker_(std::move(picker)) {} PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } - const std::string& name() { return name_; } + const std::string& name() const { return name_; } private: std::string name_; @@ -114,7 +116,7 @@ class XdsRoutingLb : public LoadBalancingPolicy { // Maintains an ordered xds route table as provided by RDS response. using RouteTable = std::vector; - RoutePicker(RouteTable route_table) + explicit RoutePicker(RouteTable route_table) : route_table_(std::move(route_table)) {} PickResult Pick(PickArgs args) override; @@ -132,7 +134,7 @@ class XdsRoutingLb : public LoadBalancingPolicy { void Orphan() override; - void UpdateLocked(const XdsRoutingLbConfig::ChildConfig& config, + void UpdateLocked(RefCountedPtr config, const ServerAddressList& addresses, const grpc_channel_args* args); void ExitIdleLocked(); @@ -221,18 +223,24 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { break; } } - std::vector v = absl::StrSplit(path.substr(1), '/'); - for (int i = 0; i < route_table_.size(); ++i) { - if (v[0] == route_table_[i].matcher.service && - ("" == route_table_[i].matcher.method || - v[1] == route_table_[i].matcher.method)) { - auto picker = route_table_[i].picker; - if (picker != nullptr) { - return picker.get()->Pick(args); - } + std::vector path_elements = + absl::StrSplit(path.substr(1), '/'); + for (const Route& route : route_table_) { + if ((path_elements[0] == route.matcher.service && + (path_elements[1] == route.matcher.method || + "" == route.matcher.method)) || + ("" == route.matcher.service && "" == route.matcher.method)) { + return route.picker.get()->Pick(args); } } - return route_table_[route_table_.size() - 1].picker.get()->Pick(args); + PickResult result; + result.type = PickResult::PICK_FAILED; + result.error = + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "xds routing picker not given any picker; default " + "route not configured"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL); + return result; } // @@ -294,7 +302,7 @@ void XdsRoutingLb::UpdateLocked(UpdateArgs args) { // Add or update the actions in the new config. for (const auto& p : config_->action_map()) { const std::string& name = p.first; - const XdsRoutingLbConfig::ChildConfig& config = p.second; + RefCountedPtr config = p.second; auto it = actions_.find(name); if (it == actions_.end()) { it = actions_.emplace(std::make_pair(name, nullptr)).first; @@ -306,9 +314,9 @@ void XdsRoutingLb::UpdateLocked(UpdateArgs args) { } void XdsRoutingLb::UpdateStateLocked() { - std::map> picker_map; // Also count the number of children in each state, to determine the // overall state. + size_t num_ready = 0; size_t num_connecting = 0; size_t num_idle = 0; size_t num_transient_failures = 0; @@ -321,7 +329,7 @@ void XdsRoutingLb::UpdateStateLocked() { } switch (child->connectivity_state()) { case GRPC_CHANNEL_READY: { - picker_map[child_name] = child->picker_wrapper(); + ++num_ready; break; } case GRPC_CHANNEL_CONNECTING: { @@ -342,7 +350,7 @@ void XdsRoutingLb::UpdateStateLocked() { } // Determine aggregated connectivity state. grpc_connectivity_state connectivity_state; - if (picker_map.size() > 0) { + if (num_ready > 0) { connectivity_state = GRPC_CHANNEL_READY; } else if (num_connecting > 0) { connectivity_state = GRPC_CHANNEL_CONNECTING; @@ -356,20 +364,30 @@ void XdsRoutingLb::UpdateStateLocked() { ConnectivityStateName(connectivity_state)); } std::unique_ptr picker; - RoutePicker::RouteTable route_table; switch (connectivity_state) { - case GRPC_CHANNEL_READY: + case GRPC_CHANNEL_READY: { + RoutePicker::RouteTable route_table; for (int i = 0; i < config_->route_table().size(); ++i) { RoutePicker::Route route; - route.matcher = config_->route_table()[i].first; - auto child_picker = picker_map.find(config_->route_table()[i].second); - if (child_picker != picker_map.end()) { - route.picker = child_picker->second; + route.matcher = config_->route_table()[i].matcher; + auto it = actions_.find(config_->route_table()[i].action); + if (it != actions_.end()) { + route.picker = it->second->picker_wrapper(); + } else { + gpr_log(GPR_INFO, + "[xds_routing_lb %p] child policy may have mis-behaved and " + "did not return a picker, creating a QueuePicker for %s", + this, config_->route_table()[i].action.c_str()); + route.picker = MakeRefCounted( + config_->route_table()[i].action, + absl::make_unique( + Ref(DEBUG_LOCATION, "QueuePicker"))); } route_table.push_back(std::move(route)); } picker = absl::make_unique(std::move(route_table)); break; + } case GRPC_CHANNEL_CONNECTING: case GRPC_CHANNEL_IDLE: picker = @@ -452,7 +470,7 @@ XdsRoutingLb::XdsRoutingChild::CreateChildPolicyLocked( } void XdsRoutingLb::XdsRoutingChild::UpdateLocked( - const XdsRoutingLbConfig::ChildConfig& config, + RefCountedPtr config, const ServerAddressList& addresses, const grpc_channel_args* args) { if (xds_routing_policy_->shutting_down_) return; // Update child weight. @@ -467,7 +485,7 @@ void XdsRoutingLb::XdsRoutingChild::UpdateLocked( } // Construct update args. UpdateArgs update_args; - update_args.config = config.config; + update_args.config = config; update_args.addresses = addresses; update_args.args = grpc_channel_args_copy(args); // Update the policy. @@ -538,8 +556,10 @@ void XdsRoutingLb::XdsRoutingChild::Helper::UpdateState( grpc_connectivity_state state, std::unique_ptr picker) { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { gpr_log(GPR_INFO, - "XdsRoutingChild::Helper::UpdateState child %s, state %d, piker %p", - xds_routing_child_->name_.c_str(), state, picker.get()); + "[xds_routing_lb %p] child %s: received update: state=%s picker=%p", + xds_routing_child_->xds_routing_policy_.get(), + xds_routing_child_->name_.c_str(), ConnectivityStateName(state), + picker.get()); } if (xds_routing_child_->xds_routing_policy_->shutting_down_) return; // Cache the picker in the XdsRoutingChild. @@ -604,6 +624,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { std::vector error_list; // action map. XdsRoutingLbConfig::ActionMap action_map; + std::set action_in_use_set; auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -613,7 +634,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { "field:actions error:type should be object")); } else { for (const auto& p : it->second.object_value()) { - XdsRoutingLbConfig::ChildConfig child_config; + RefCountedPtr child_config; std::vector child_errors = ParseChildConfig(p.second, &child_config); if (!child_errors.empty()) { @@ -630,6 +651,10 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { } } } + if (action_map.size() == 0) { + error_list.push_back( + GRPC_ERROR_CREATE_FROM_COPIED_STRING("no valid actions configured")); + } XdsRoutingLbConfig::RouteTable route_table; it = json.object_value().find("routes"); if (it == json.object_value().end()) { @@ -639,36 +664,76 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "field:routes error:type should be array")); } else { - for (const auto& route : it->second.array_value()) { - // Parse methodName. - XdsRoutingLbConfig::Matcher matcher; - std::vector route_errors = - ParseRouteConfig(route.object_value(), &matcher); - if (!route_errors.empty()) { - // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error - // string is not static in this case. - grpc_error* error = - GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:routes error"); - for (grpc_error* route_error : route_errors) { - error = grpc_error_add_child(error, route_error); + const Json::Array& array = it->second.array_value(); + for (size_t i = 0; i < array.size(); ++i) { + const Json& element = array[i]; + if (element.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("filed: routes element: ", i, + " should be of type object") + .c_str())); + } else { + XdsRoutingLbConfig::Route route; + // Parse MethodName. + auto it = element.object_value().find("methodName"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:routes element: ", i, + " methodName is required") + .c_str())); + } else if (it->second.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:routes element: ", i, + " methodName type should be object") + .c_str())); + } else { + std::vector route_errors = + ParseRouteConfig(it->second, &route.matcher); + if (!route_errors.empty()) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:routes element: ", i, " error").c_str()); + for (grpc_error* route_error : route_errors) { + error = grpc_error_add_child(error, route_error); + } + error_list.push_back(error); + } } - error_list.push_back(error); - } - // Parse action. - std::string cluster_name; - std::vector action_errors = - ParseActionConfig(route.object_value(), &cluster_name); - if (!action_errors.empty()) { - // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error - // string is not static in this case. - grpc_error* error = - GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:actions error:"); - for (grpc_error* action_error : action_errors) { - error = grpc_error_add_child(error, action_error); + // Parse action. + it = element.object_value().find("action"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:routes element: ", i, " action is required") + .c_str())); + } else if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:routes element: ", i, + " action type should be string") + .c_str())); + } else { + route.action = it->second.string_value(); } - error_list.push_back(error); + // Validate action exists and mark it as used. + if (action_map.find(route.action) == action_map.end()) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("action ", route.action, " does not exist") + .c_str()); + error_list.push_back(error); + } else { + action_in_use_set.insert(route.action); + } + route_table.emplace_back(std::move(route)); } - route_table.emplace_back(std::move(matcher), std::move(cluster_name)); + } + } + if (route_table.size() == 0) { + grpc_error* error = + GRPC_ERROR_CREATE_FROM_STATIC_STRING("no valid routes configured"); + error_list.push_back(error); + } + for (const auto& action : action_map) { + if (action_in_use_set.find(action.first) == action_in_use_set.end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("action ", action.first, " is never used").c_str())); } } if (!error_list.empty()) { @@ -682,7 +747,8 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { private: static std::vector ParseChildConfig( - const Json& json, XdsRoutingLbConfig::ChildConfig* child_config) { + const Json& json, + RefCountedPtr* child_config) { std::vector error_list; if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -690,21 +756,20 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { return error_list; } auto it = json.object_value().find("child_policy"); - if (it != json.object_value().end()) { + if (it == json.object_value().end()) { + error_list.push_back( + GRPC_ERROR_CREATE_FROM_STATIC_STRING("did not find childPolicy")); + } else { grpc_error* parse_error = GRPC_ERROR_NONE; - child_config->config = - LoadBalancingPolicyRegistry::ParseLoadBalancingConfig(it->second, - &parse_error); - if (child_config->config == nullptr) { + *child_config = LoadBalancingPolicyRegistry::ParseLoadBalancingConfig( + it->second, &parse_error); + if (*child_config == nullptr) { GPR_DEBUG_ASSERT(parse_error != GRPC_ERROR_NONE); std::vector child_errors; child_errors.push_back(parse_error); error_list.push_back( GRPC_ERROR_CREATE_FROM_VECTOR("field:childPolicy", &child_errors)); } - } else { - error_list.push_back( - GRPC_ERROR_CREATE_FROM_STATIC_STRING("did not find childPolicy")); } return error_list; } @@ -712,57 +777,32 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { static std::vector ParseRouteConfig( const Json& json, XdsRoutingLbConfig::Matcher* route_config) { std::vector error_list; - if (json.type() != Json::Type::OBJECT) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "value should be of type object")); - return error_list; - } - auto method_name = json.object_value().find("methodName"); - if (method_name == json.object_value().end()) { + // Parse service + auto it = json.object_value().find("service"); + if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes error:methodName is required")); - } else if (method_name->second.type() != Json::Type::OBJECT) { + "field:service error: required field not present")); + } else if (it->second.type() != Json::Type::STRING) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes error:methodName error: type should be object")); + "field:service error: should be string")); } else { - auto service = method_name->second.object_value().find("service"); - auto method = method_name->second.object_value().find("method"); - if (service != method_name->second.object_value().end()) { - route_config->service = service->second.string_value(); - } else { - route_config->service = ""; - } - if (method != method_name->second.object_value().end()) { - route_config->method = method->second.string_value(); - } else { - route_config->method = ""; - } - if ((route_config->service == "") && (route_config->method != "")) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName error: service is empty when method is " - "not")); - } + route_config->service = it->second.string_value(); } - return error_list; - } - - static std::vector ParseActionConfig(const Json& json, - std::string* cluster_name) { - std::vector error_list; - if (json.type() != Json::Type::OBJECT) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "value should be of type object")); - return error_list; - } - auto action_name = json.object_value().find("action"); - if (action_name == json.object_value().end()) { + // Parse method + it = json.object_value().find("method"); + if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes error:action is required")); - } else if (action_name->second.type() != Json::Type::STRING) { + "field:method error: required field not present")); + } else if (it->second.type() != Json::Type::STRING) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName error:type should be string")); + "field:method error: should be string")); } else { - *cluster_name = action_name->second.string_value(); + route_config->method = it->second.string_value(); + } + if (route_config->service == "" && route_config->method != "") { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:methodName error: service is empty when method is " + "not")); } return error_list; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index eca6f52a0df..2c18af46a16 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1020,26 +1020,26 @@ grpc_error* RouteConfigParse( const upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); const upb_strview path = envoy_api_v2_route_RouteMatch_path(match); if (prefix.size > 0) { - std::vector v = absl::StrSplit( + std::vector prefix_elements = absl::StrSplit( absl::string_view(prefix.data, prefix.size).substr(1), '/'); - if (v.size() != 1) { + if (prefix_elements.size() != 1) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix not in the required format of /service/"); } - rds_route.service = std::string(v[0].data(), v[0].size()); + rds_route.service = std::string(prefix_elements[0]); if (path.size > 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix is not empty string, path cannot also be non-empty."); } } else if (path.size > 0) { - std::vector v = absl::StrSplit( + std::vector path_elements = absl::StrSplit( absl::string_view(path.data, path.size).substr(1), '/'); - if (v.size() != 2) { + if (path_elements.size() != 2) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path not in the required format of /service/method"); } - rds_route.service = std::string(v[0].data(), v[0].size()); - rds_route.method = std::string(v[1].data(), v[1].size()); + rds_route.service = std::string(path_elements[0]); + rds_route.method = std::string(path_elements[1]); if (prefix.size > 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path is not empty string, prefix cannot also be non-empty."); diff --git a/src/core/ext/filters/client_channel/xds/xds_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h index dc42681ecb4..28b173f019e 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.h +++ b/src/core/ext/filters/client_channel/xds/xds_api.h @@ -50,8 +50,7 @@ class XdsApi { std::string cluster_name; bool operator==(const RdsRoute& other) const { - return (service == other.service && - method == other.method && + return (service == other.service && method == other.method && cluster_name == other.cluster_name); } }; diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index db5f33ecc7a..29ca7e78901 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -71,35 +71,6 @@ namespace grpc_core { TraceFlag grpc_xds_client_trace(false, "xds_client"); -namespace { - -std::string CreateServiceConfigActionCluster(const std::string& cluster_name) { - std::string json = absl::StrFormat( - " \"cds:%s\":{\n" - " \"child_policy\":[\n" - " { \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " } }\n" - " ]\n" - " }", - cluster_name.c_str(), cluster_name.c_str()); - return json; -} - -std::string CreateServiceConfigRoute(const std::string& cluster_name, - const std::string& service, - const std::string& method) { - std::string json = absl::StrFormat( - " { \"methodName\":\n" - " { \"service\": \"%s\",\n" - " \"method\": \"%s\"},\n" - " \"action\": \"cds:%s\"\n" - " }", - service.c_str(), method.c_str(), cluster_name.c_str()); - return json; -} - -} // namespace // // Internal class declarations // @@ -934,7 +905,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( if (lds_update->rds_update.has_value()) { for (const auto& route : lds_update->rds_update.value().routes) { gpr_log(GPR_INFO, - "Create service config using route: { service=\"%s\", " + " route: { service=\"%s\", " "method=\"%s\" }, cluster=\"%s\" }", route.service.c_str(), route.method.c_str(), route.cluster_name.c_str()); @@ -989,6 +960,16 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "RDS update does not include requested resource")); return; + } else { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { + for (const auto& route : rds_update.value().routes) { + gpr_log(GPR_INFO, + " route: { service=\"%s\", " + "method=\"%s\" }, cluster=\"%s\" }", + route.service.c_str(), route.method.c_str(), + route.cluster_name.c_str()); + } + } } auto& rds_state = state_map_[XdsApi::kRdsTypeUrl]; auto& state = @@ -2065,27 +2046,55 @@ void XdsClient::ResetBackoff() { } } +namespace { +std::string CreateServiceConfigActionCluster(const std::string& cluster_name) { + std::string json = absl::StrFormat( + " \"cds:%s\":{\n" + " \"child_policy\":[ {\n" + " \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " }\n" + " } ]\n" + " }", + cluster_name.c_str(), cluster_name.c_str()); + return json; +} + +std::string CreateServiceConfigRoute(const std::string& cluster_name, + const std::string& service, + const std::string& method) { + std::string json = absl::StrFormat( + " { \n" + " \"methodName\": {\n" + " \"service\": \"%s\",\n" + " \"method\": \"%s\"\n" + " },\n" + " \"action\": \"cds:%s\"\n" + " }", + service.c_str(), method.c_str(), cluster_name.c_str()); + return json; +} +} // namespace + grpc_error* XdsClient::CreateServiceConfig( const XdsApi::RdsUpdate& rds_update, RefCountedPtr* service_config) const { - std::vector v; - std::string json_start = - ("{\n" - " \"loadBalancingConfig\":[\n" - " { \"xds_routing_experimental\":{\n" - " \"actions\":{\n"); - v.push_back(std::move(json_start)); + std::vector config_parts; + config_parts.push_back( + "{\n" + " \"loadBalancingConfig\":[\n" + " { \"xds_routing_experimental\":{\n" + " \"actions\":{\n"); std::vector actions_vector; for (size_t i = 0; i < rds_update.routes.size(); ++i) { auto route = rds_update.routes[i]; actions_vector.push_back( CreateServiceConfigActionCluster(route.cluster_name.c_str())); } - v.push_back(absl::StrJoin(actions_vector, ",")); - std::string json_transition = - (" },\n" - " \"routes\":[\n"); - v.push_back(std::move(json_transition)); + config_parts.push_back(absl::StrJoin(actions_vector, ",\n")); + config_parts.push_back( + " },\n" + " \"routes\":[\n"); std::vector routes_vector; for (size_t i = 0; i < rds_update.routes.size(); ++i) { auto route_info = rds_update.routes[i]; @@ -2093,14 +2102,13 @@ grpc_error* XdsClient::CreateServiceConfig( route_info.cluster_name.c_str(), route_info.service.c_str(), route_info.method.c_str())); } - v.push_back(absl::StrJoin(routes_vector, ",")); - std::string json_end = - (" ]\n" - " } }\n" - " ]\n" - "}"); - v.push_back(std::move(json_end)); - std::string json = absl::StrJoin(v, ""); + config_parts.push_back(absl::StrJoin(routes_vector, ",\n")); + config_parts.push_back( + " ]\n" + " } }\n" + " ]\n" + "}"); + std::string json = absl::StrJoin(config_parts, ""); grpc_error* error = GRPC_ERROR_NONE; *service_config = ServiceConfig::Create(json.c_str(), &error); gpr_log(GPR_INFO, "Built service config: \"%s\"", From c0c7f1dae370d57cafed4d8950a7a671025709bc Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 6 Apr 2020 16:05:04 -0700 Subject: [PATCH 07/37] Fix build and test failures Including the following All all needed BUILD changes to include new xdsRouting Fixed TSAN errors AllServerUnavailableFailFast may return UNKNOWN as oppose UNAVAILABLE ChooseLastRoute modified into 2 tests --- BUILD | 1 + BUILD.gn | 1 + CMakeLists.txt | 2 + Makefile | 2 + build_autogenerated.yaml | 2 + config.m4 | 1 + config.w32 | 1 + gRPC-Core.podspec | 1 + grpc.gemspec | 1 + grpc.gyp | 2 + package.xml | 1 + .../lb_policy/xds/xds_routing.cc | 11 +- src/python/grpcio/grpc_core_dependencies.py | 1 + test/cpp/end2end/xds_end2end_test.cc | 218 ++++++++++++++---- tools/doxygen/Doxyfile.c++.internal | 1 + tools/doxygen/Doxyfile.core.internal | 1 + 16 files changed, 191 insertions(+), 56 deletions(-) diff --git a/BUILD b/BUILD index 58148f58dab..30696bb45b9 100644 --- a/BUILD +++ b/BUILD @@ -321,6 +321,7 @@ grpc_cc_library( "grpc_lb_policy_cds", "grpc_lb_policy_grpclb", "grpc_lb_policy_xds", + "grpc_lb_policy_xds_routing", "grpc_resolver_xds", ], ) diff --git a/BUILD.gn b/BUILD.gn index 306f2bfa6d8..ba1da8dd097 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -243,6 +243,7 @@ config("grpc_config") { "src/core/ext/filters/client_channel/lb_policy/xds/cds.cc", "src/core/ext/filters/client_channel/lb_policy/xds/xds.cc", "src/core/ext/filters/client_channel/lb_policy/xds/xds.h", + "src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc", "src/core/ext/filters/client_channel/lb_policy_factory.h", "src/core/ext/filters/client_channel/lb_policy_registry.cc", "src/core/ext/filters/client_channel/lb_policy_registry.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a52205e480..07f1c8707de 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1327,6 +1327,7 @@ add_library(grpc src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc src/core/ext/filters/client_channel/lb_policy_registry.cc src/core/ext/filters/client_channel/local_subchannel_pool.cc src/core/ext/filters/client_channel/parse_address.cc @@ -1981,6 +1982,7 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc src/core/ext/filters/client_channel/lb_policy_registry.cc src/core/ext/filters/client_channel/local_subchannel_pool.cc src/core/ext/filters/client_channel/parse_address.cc diff --git a/Makefile b/Makefile index 08ee1cbe217..f5e81897927 100644 --- a/Makefile +++ b/Makefile @@ -3657,6 +3657,7 @@ LIBGRPC_SRC = \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ @@ -4286,6 +4287,7 @@ LIBGRPC_UNSECURE_SRC = \ src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 14daa48e2b7..8b76ea81666 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -751,6 +751,7 @@ libs: - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc - src/core/ext/filters/client_channel/lb_policy_registry.cc - src/core/ext/filters/client_channel/local_subchannel_pool.cc - src/core/ext/filters/client_channel/parse_address.cc @@ -1582,6 +1583,7 @@ libs: - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc + - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc - src/core/ext/filters/client_channel/lb_policy_registry.cc - src/core/ext/filters/client_channel/local_subchannel_pool.cc - src/core/ext/filters/client_channel/parse_address.cc diff --git a/config.m4 b/config.m4 index 66922ad5527..8dc2fc8920a 100644 --- a/config.m4 +++ b/config.m4 @@ -61,6 +61,7 @@ if test "$PHP_GRPC" != "no"; then src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/local_subchannel_pool.cc \ src/core/ext/filters/client_channel/parse_address.cc \ diff --git a/config.w32 b/config.w32 index 541cc74b602..eb32d08f6fb 100644 --- a/config.w32 +++ b/config.w32 @@ -30,6 +30,7 @@ if (PHP_GRPC != "no") { "src\\core\\ext\\filters\\client_channel\\lb_policy\\round_robin\\round_robin.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\cds.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds.cc " + + "src\\core\\ext\\filters\\client_channel\\lb_policy\\xds\\xds_routing.cc " + "src\\core\\ext\\filters\\client_channel\\lb_policy_registry.cc " + "src\\core\\ext\\filters\\client_channel\\local_subchannel_pool.cc " + "src\\core\\ext\\filters\\client_channel\\parse_address.cc " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index d140b005d7f..183efc4f74c 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -226,6 +226,7 @@ Pod::Spec.new do |s| 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.h', + 'src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc', 'src/core/ext/filters/client_channel/lb_policy_factory.h', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.h', diff --git a/grpc.gemspec b/grpc.gemspec index c9a1a8835fd..8c6b8413c1f 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -148,6 +148,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/cds.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds.h ) + s.files += %w( src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy_factory.h ) s.files += %w( src/core/ext/filters/client_channel/lb_policy_registry.cc ) s.files += %w( src/core/ext/filters/client_channel/lb_policy_registry.h ) diff --git a/grpc.gyp b/grpc.gyp index b065ee6f2af..e5168e57df4 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -453,6 +453,7 @@ 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', @@ -943,6 +944,7 @@ 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', diff --git a/package.xml b/package.xml index 32930b6f65c..f0ace0d092d 100644 --- a/package.xml +++ b/package.xml @@ -128,6 +128,7 @@ + diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 703f670186f..53e6b26767a 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -95,7 +95,7 @@ class XdsRoutingLb : public LoadBalancingPolicy { ChildPickerWrapper(std::string name, std::unique_ptr picker) : name_(std::move(name)), picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { return picker_->Pick(std::move(args)); } + PickResult Pick(PickArgs args) { return picker_->Pick(args); } const std::string& name() const { return name_; } @@ -239,7 +239,7 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "xds routing picker not given any picker; default " "route not configured"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL); + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); return result; } @@ -417,8 +417,8 @@ XdsRoutingLb::XdsRoutingChild::XdsRoutingChild( XdsRoutingLb::XdsRoutingChild::~XdsRoutingChild() { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { gpr_log(GPR_INFO, - "[xds_routing_lb %p] XdsRoutingChild %p %s: destroying child", - xds_routing_policy_.get(), this, name_.c_str()); + "[xds_routing_lb %p] XdsRoutingChild %p: destroying child", + xds_routing_policy_.get(), this); } xds_routing_policy_.reset(DEBUG_LOCATION, "XdsRoutingChild"); } @@ -485,7 +485,7 @@ void XdsRoutingLb::XdsRoutingChild::UpdateLocked( } // Construct update args. UpdateArgs update_args; - update_args.config = config; + update_args.config = std::move(config); update_args.addresses = addresses; update_args.args = grpc_channel_args_copy(args); // Update the policy. @@ -509,6 +509,7 @@ void XdsRoutingLb::XdsRoutingChild::ResetBackoffLocked() { void XdsRoutingLb::XdsRoutingChild::DeactivateLocked() { // If already deactivated, don't do that again. + if (delayed_removal_timer_callback_pending_ == true) return; // Set the child weight to 0 so that future picker won't contain this child. // Start a timer to delete the child. Ref(DEBUG_LOCATION, "XdsRoutingChild+timer").release(); diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 46cce67f77e..1d4b07668bf 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -39,6 +39,7 @@ CORE_SOURCE_FILES = [ 'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc', 'src/core/ext/filters/client_channel/lb_policy/xds/xds.cc', + 'src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc', 'src/core/ext/filters/client_channel/lb_policy_registry.cc', 'src/core/ext/filters/client_channel/local_subchannel_pool.cc', 'src/core/ext/filters/client_channel/parse_address.cc', diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 803af0dd346..c2b998bdf58 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1662,7 +1662,9 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) { AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); const Status status = SendRpc(); // The error shouldn't be DEADLINE_EXCEEDED. - EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); + gpr_log(GPR_INFO, "error code %d message received %s", status.error_code(), + status.error_message().c_str()); + EXPECT_NE(StatusCode::DEADLINE_EXCEEDED, status.error_code()); } // Tests that RPCs fail when the backends are down, and will succeed again after @@ -2049,13 +2051,11 @@ TEST_P(LdsTest, ChooseMatchedDomain) { AdsServiceImpl::ACKED); } -// Tests that LDS client should choose the last route in the virtual host if -// multiple routes exist in the LDS response. -TEST_P(LdsTest, ChooseLastRoute) { +// Tests that the LDS client should NACK when the last route is not a default +// route. +TEST_P(LdsTest, DefaultRouteInvalid) { RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); - *(route_config.mutable_virtual_hosts(0)->add_routes()) = - route_config.virtual_hosts(0).routes(0); route_config.mutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_route() @@ -2066,10 +2066,10 @@ TEST_P(LdsTest, ChooseLastRoute) { SetNextResolutionForLbChannelAllBalancers(); (void)SendRpc(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), - AdsServiceImpl::ACKED); + AdsServiceImpl::NACKED); } -// Tests that LDS client should send a NACK if route match has non-empty prefix +// Tests that LDS client should send a ACK if route match has non-empty prefix // in the LDS response. TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { RouteConfiguration route_config = @@ -2084,7 +2084,7 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { SetNextResolutionForLbChannelAllBalancers(); CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), - AdsServiceImpl::NACKED); + AdsServiceImpl::ACKED); } // Tests that LDS client should send a NACK if route has an action other than @@ -2130,7 +2130,9 @@ TEST_P(LdsTest, Timeout) { } TEST_P(LdsTest, XdsRoutingPathMatching) { - const char* kNewClusterName = "new_cluster_name"; + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const size_t kNumRpcs = 10; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); AdsServiceImpl::EdsResourceArgs args({ @@ -2140,34 +2142,59 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); // We need to wait for all backends to come online. WaitForAllBackends(0, 2); - // Populate new EDS resource. + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 4)}, + {"locality0", GetBackendPorts(3, 4)}, }); balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewClusterName), - kNewClusterName); - // Populate new CDS resource. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster, kNewClusterName); - // Change RDS resource to point to new cluster. + AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), + kNewCluster1Name); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewCluster2Name), + kNewCluster2Name); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1, kNewCluster1Name); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); + // Change RDS resource to set up prefix matching to direct traffic to the + // first new cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - auto* route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route->mutable_match()->set_path("/grpc.testing.EchoTestService/Echo"); - route->mutable_route()->set_cluster(kNewClusterName); + auto* mismatched_route = + new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + mismatched_route->mutable_match()->set_path( + "/grpc.testing.EchoTestService/Echo"); + mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); + auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + matched_route->mutable_match()->set_path( + "/grpc.testing.EchoTestService/NewMethod"); + matched_route->mutable_route()->set_cluster(kNewCluster2Name); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - // Wait for all new backends to be used. - std::tuple counts = WaitForAllBackends(2, 4); - // Make sure no RPCs failed in the transition. - EXPECT_EQ(0, std::get<1>(counts)); + // Wait for the new backend to come up. + WaitForAllBackends(2, 3); + CheckRpcSendOk(kNumRpcs); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 4; ++i) { + if (i == 2) { + EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->request_count()); + } else { + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + } + } } TEST_P(LdsTest, XdsRoutingPrefixMatching) { - const char* kNewClusterName = "new_cluster_name"; + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const size_t kNumRpcs = 10; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); AdsServiceImpl::EdsResourceArgs args({ @@ -2177,30 +2204,121 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); // We need to wait for all backends to come online. WaitForAllBackends(0, 2); - // Populate new EDS resource. + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(2, 4)}, + {"locality0", GetBackendPorts(3, 4)}, }); balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewClusterName), - kNewClusterName); - // Populate new CDS resource. - Cluster new_cluster = balancers_[0]->ads_service()->default_cluster(); - new_cluster.set_name(kNewClusterName); - balancers_[0]->ads_service()->SetCdsResource(new_cluster, kNewClusterName); - // Change RDS resource to point to new cluster. + AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), + kNewCluster1Name); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewCluster2Name), + kNewCluster2Name); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1, kNewCluster1Name); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); + // Change RDS resource to set up prefix matching to direct traffic to the + // second new cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - auto* route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route->mutable_match()->set_prefix("/grpc.testing.EchoTestService"); - route->mutable_route()->set_cluster(kNewClusterName); + auto* mismatched_route = + new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + mismatched_route->mutable_match()->set_prefix( + "/grpc.testing.EchoTestService0"); + mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); + auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTestService"); + matched_route->mutable_route()->set_cluster(kNewCluster2Name); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - // Wait for all new backends to be used. - std::tuple counts = WaitForAllBackends(2, 4); - // Make sure no RPCs failed in the transition. - EXPECT_EQ(0, std::get<1>(counts)); + // Wait for the new backend to come up. + WaitForAllBackends(3, 4); + CheckRpcSendOk(kNumRpcs); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 4; ++i) { + if (i == 3) { + EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->request_count()); + } else { + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + } + } +} + +// Tests that LDS client should choose the default route (with no matching +// specified) after unable to find a match with previous routes. +TEST_P(LdsTest, XdsRoutingDefaultRoute) { + const char* kNewCluster1Name = "new_cluster_1"; + const char* kNewCluster2Name = "new_cluster_2"; + const size_t kNumRpcs = 10; + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); + // We need to wait for all backends to come online. + WaitForAllBackends(0, 2); + // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args1({ + {"locality0", GetBackendPorts(2, 3)}, + }); + AdsServiceImpl::EdsResourceArgs args2({ + {"locality0", GetBackendPorts(3, 4)}, + }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), + kNewCluster1Name); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args2, kNewCluster2Name), + kNewCluster2Name); + // Populate new CDS resources. + Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); + new_cluster1.set_name(kNewCluster1Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster1, kNewCluster1Name); + Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); + new_cluster2.set_name(kNewCluster2Name); + balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); + // Change RDS resource to set up prefix matching to direct traffic to the + // second new cluster. + RouteConfiguration new_route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* mismatched_route1 = + new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + mismatched_route1->mutable_match()->set_prefix( + "/grpc.testing.EchoTestService0"); + mismatched_route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* mismatched_route2 = + new_route_config.mutable_virtual_hosts(0)->add_routes(); + mismatched_route2->mutable_match()->set_path( + "/grpc.testing.EchoTestService/EchoMismatch"); + mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_match()->set_path(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); + Listener listener = + balancers_[0]->ads_service()->BuildListener(new_route_config); + balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); + // Wait for the new backend to come up. + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumRpcs); + // Make sure RPCs all go to the correct backend. + for (size_t i = 0; i < 4; ++i) { + if (i < 2) { + EXPECT_EQ(kNumRpcs / 2, backends_[i]->backend_service()->request_count()); + } else { + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + } + } } using RdsTest = BasicTest; @@ -2254,14 +2372,12 @@ TEST_P(RdsTest, ChooseMatchedDomain) { AdsServiceImpl::ACKED); } -// Tests that RDS client should choose the last route in the virtual host if -// multiple routes exist in the RDS response. -TEST_P(RdsTest, ChooseLastRoute) { +// Tests that the RDS client should NACK when the last route is not a default +// route. +TEST_P(RdsTest, DefaultRouteInvalid) { balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); - *(route_config.mutable_virtual_hosts(0)->add_routes()) = - route_config.virtual_hosts(0).routes(0); route_config.mutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_route() @@ -2272,10 +2388,10 @@ TEST_P(RdsTest, ChooseLastRoute) { SetNextResolutionForLbChannelAllBalancers(); (void)SendRpc(); EXPECT_EQ(balancers_[0]->ads_service()->rds_response_state(), - AdsServiceImpl::ACKED); + AdsServiceImpl::NACKED); } -// Tests that RDS client should send a NACK if route match has non-empty prefix +// Tests that RDS client should send a ACK if route match has non-empty prefix // in the RDS response. TEST_P(RdsTest, RouteMatchHasNonemptyPrefix) { balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); @@ -2291,7 +2407,7 @@ TEST_P(RdsTest, RouteMatchHasNonemptyPrefix) { SetNextResolutionForLbChannelAllBalancers(); CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->rds_response_state(), - AdsServiceImpl::NACKED); + AdsServiceImpl::ACKED); } // Tests that RDS client should send a NACK if route has an action other than diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index edbe236dc17..31d46c7f4fc 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -1111,6 +1111,7 @@ src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.h \ +src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc \ src/core/ext/filters/client_channel/lb_policy_factory.h \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/lb_policy_registry.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 5c03ac61231..e58f2c53ebe 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -908,6 +908,7 @@ src/core/ext/filters/client_channel/lb_policy/subchannel_list.h \ src/core/ext/filters/client_channel/lb_policy/xds/cds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.cc \ src/core/ext/filters/client_channel/lb_policy/xds/xds.h \ +src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc \ src/core/ext/filters/client_channel/lb_policy_factory.h \ src/core/ext/filters/client_channel/lb_policy_registry.cc \ src/core/ext/filters/client_channel/lb_policy_registry.h \ From f3f11cc21cd3145f45acc037d36721def3cc4770 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 6 Apr 2020 23:21:02 -0700 Subject: [PATCH 08/37] Added new RPC methods to test routing different RPCs to different backends. --- src/proto/grpc/testing/echo.proto | 2 + test/cpp/end2end/test_service_impl.cc | 12 ++ test/cpp/end2end/test_service_impl.h | 6 + test/cpp/end2end/xds_end2end_test.cc | 158 +++++++++++++++++++------- 4 files changed, 139 insertions(+), 39 deletions(-) diff --git a/src/proto/grpc/testing/echo.proto b/src/proto/grpc/testing/echo.proto index 977858f6bc5..6ea9873928e 100644 --- a/src/proto/grpc/testing/echo.proto +++ b/src/proto/grpc/testing/echo.proto @@ -22,6 +22,8 @@ package grpc.testing; service EchoTestService { rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); // A service which checks that the initial metadata sent over contains some // expected key value pair rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc index 94628195b06..ad1592bf7d8 100644 --- a/test/cpp/end2end/test_service_impl.cc +++ b/test/cpp/end2end/test_service_impl.cc @@ -234,6 +234,18 @@ Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request, return Status::OK; } +Status TestServiceImpl::Echo1(ServerContext* context, + const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); +} + +Status TestServiceImpl::Echo2(ServerContext* context, + const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); +} + Status TestServiceImpl::CheckClientInitialMetadata( ServerContext* context, const SimpleRequest* /*request*/, SimpleResponse* /*response*/) { diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 0978d5f19b7..e41359f9655 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -84,6 +84,12 @@ class TestServiceImpl : public ::grpc::testing::EchoTestService::Service { Status Echo(ServerContext* context, const EchoRequest* request, EchoResponse* response) override; + Status Echo1(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override; + + Status Echo2(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override; + Status CheckClientInitialMetadata(ServerContext* context, const SimpleRequest* request, SimpleResponse* response) override; diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index c2b998bdf58..f7772ba0376 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -259,6 +259,36 @@ class BackendServiceImpl : public BackendService { return status; } + Status Echo1(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + // Backend should receive the call credentials metadata. + auto call_credentials_entry = + context->client_metadata().find(g_kCallCredsMdKey); + EXPECT_NE(call_credentials_entry, context->client_metadata().end()); + if (call_credentials_entry != context->client_metadata().end()) { + EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); + } + echo1_request_count_++; + const auto status = TestServiceImpl::Echo1(context, request, response); + AddClient(context->peer()); + return status; + } + + Status Echo2(ServerContext* context, const EchoRequest* request, + EchoResponse* response) override { + // Backend should receive the call credentials metadata. + auto call_credentials_entry = + context->client_metadata().find(g_kCallCredsMdKey); + EXPECT_NE(call_credentials_entry, context->client_metadata().end()); + if (call_credentials_entry != context->client_metadata().end()) { + EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); + } + echo2_request_count_++; + const auto status = TestServiceImpl::Echo2(context, request, response); + AddClient(context->peer()); + return status; + } + void Start() {} void Shutdown() {} @@ -267,6 +297,10 @@ class BackendServiceImpl : public BackendService { return clients_; } + size_t Echo1RequestCount() { return echo1_request_count_; } + + size_t Echo2RequestCount() { return echo2_request_count_; } + private: void AddClient(const grpc::string& client) { grpc_core::MutexLock lock(&clients_mu_); @@ -276,6 +310,8 @@ class BackendServiceImpl : public BackendService { grpc_core::Mutex mu_; grpc_core::Mutex clients_mu_; std::set clients_; + size_t echo1_request_count_ = 0; + size_t echo2_request_count_ = 0; }; class ClientStats { @@ -1356,6 +1392,34 @@ class XdsEnd2endTest : public ::testing::TestWithParam { return status; } + Status SendEcho1Rpc(EchoResponse* response = nullptr, int timeout_ms = 1000, + bool wait_for_ready = false) { + const bool local_response = (response == nullptr); + if (local_response) response = new EchoResponse; + EchoRequest request; + request.set_message(kRequestMessage_); + ClientContext context; + context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); + if (wait_for_ready) context.set_wait_for_ready(true); + Status status = stub_->Echo1(&context, request, response); + if (local_response) delete response; + return status; + } + + Status SendEcho2Rpc(EchoResponse* response = nullptr, int timeout_ms = 1000, + bool wait_for_ready = false) { + const bool local_response = (response == nullptr); + if (local_response) response = new EchoResponse; + EchoRequest request; + request.set_message(kRequestMessage_); + ClientContext context; + context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); + if (wait_for_ready) context.set_wait_for_ready(true); + Status status = stub_->Echo2(&context, request, response); + if (local_response) delete response; + return status; + } + void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000, bool wait_for_ready = false) { for (size_t i = 0; i < times; ++i) { @@ -1372,6 +1436,28 @@ class XdsEnd2endTest : public ::testing::TestWithParam { EXPECT_FALSE(status.ok()); } + void CheckEcho1RpcSendOk(const size_t times = 1, const int timeout_ms = 1000, + bool wait_for_ready = false) { + for (size_t i = 0; i < times; ++i) { + EchoResponse response; + const Status status = SendEcho1Rpc(&response, timeout_ms, wait_for_ready); + EXPECT_TRUE(status.ok()) << "code=" << status.error_code() + << " message=" << status.error_message(); + EXPECT_EQ(response.message(), kRequestMessage_); + } + } + + void CheckEcho2RpcSendOk(const size_t times = 1, const int timeout_ms = 1000, + bool wait_for_ready = false) { + for (size_t i = 0; i < times; ++i) { + EchoResponse response; + const Status status = SendEcho2Rpc(&response, timeout_ms, wait_for_ready); + EXPECT_TRUE(status.ok()) << "code=" << status.error_code() + << " message=" << status.error_message(); + EXPECT_EQ(response.message(), kRequestMessage_); + } + } + public: // This method could benefit test subclasses; to make it accessible // via bind with a qualified name, it needs to be public. @@ -2129,26 +2215,26 @@ TEST_P(LdsTest, Timeout) { CheckRpcSendFailure(); } +// Tests that LDS client should choose the default route (with no matching +// specified) after unable to find a match with previous routes. TEST_P(LdsTest, XdsRoutingPathMatching) { const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; const size_t kNumRpcs = 10; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 2)}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); - // We need to wait for all backends to come online. - WaitForAllBackends(0, 2); - // Populate new EDS resources. AdsServiceImpl::EdsResourceArgs args1({ {"locality0", GetBackendPorts(2, 3)}, }); AdsServiceImpl::EdsResourceArgs args2({ {"locality0", GetBackendPorts(3, 4)}, }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); balancers_[0]->ads_service()->SetEdsResource( AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), kNewCluster1Name); @@ -2163,28 +2249,34 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { new_cluster2.set_name(kNewCluster2Name); balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); // Change RDS resource to set up prefix matching to direct traffic to the - // first new cluster. + // second new cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - auto* mismatched_route = + auto* mismatched_route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - mismatched_route->mutable_match()->set_path( - "/grpc.testing.EchoTestService/Echo"); - mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); - auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - matched_route->mutable_match()->set_path( - "/grpc.testing.EchoTestService/NewMethod"); - matched_route->mutable_route()->set_cluster(kNewCluster2Name); + mismatched_route1->mutable_match()->set_path( + "/grpc.testing.EchoTestService/Echo1"); + mismatched_route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* mismatched_route2 = + new_route_config.mutable_virtual_hosts(0)->add_routes(); + mismatched_route2->mutable_match()->set_path( + "/grpc.testing.EchoTestService/Echo2"); + mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_match()->set_path(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - // Wait for the new backend to come up. - WaitForAllBackends(2, 3); - CheckRpcSendOk(kNumRpcs); + CheckEcho1RpcSendOk(kNumRpcs, 1000, true); + CheckEcho2RpcSendOk(kNumRpcs, 1000, true); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 4; ++i) { if (i == 2) { - EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo1RequestCount()); + } else if (i == 3) { + EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo2RequestCount()); } else { EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); } @@ -2197,13 +2289,6 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { const size_t kNumRpcs = 10; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); - // We need to wait for all backends to come online. - WaitForAllBackends(0, 2); // Populate new EDS resources. AdsServiceImpl::EdsResourceArgs args1({ {"locality0", GetBackendPorts(2, 3)}, @@ -2239,15 +2324,13 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - // Wait for the new backend to come up. - WaitForAllBackends(3, 4); - CheckRpcSendOk(kNumRpcs); + CheckEcho1RpcSendOk(kNumRpcs, 1000, true); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 4; ++i) { if (i == 3) { - EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo1RequestCount()); } else { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service()->Echo1RequestCount()); } } } @@ -2260,20 +2343,18 @@ TEST_P(LdsTest, XdsRoutingDefaultRoute) { const size_t kNumRpcs = 10; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); + // Populate new EDS resources. AdsServiceImpl::EdsResourceArgs args({ {"locality0", GetBackendPorts(0, 2)}, }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); - // We need to wait for all backends to come online. - WaitForAllBackends(0, 2); - // Populate new EDS resources. AdsServiceImpl::EdsResourceArgs args1({ {"locality0", GetBackendPorts(2, 3)}, }); AdsServiceImpl::EdsResourceArgs args2({ {"locality0", GetBackendPorts(3, 4)}, }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); balancers_[0]->ads_service()->SetEdsResource( AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), kNewCluster1Name); @@ -2287,8 +2368,8 @@ TEST_P(LdsTest, XdsRoutingDefaultRoute) { Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); new_cluster2.set_name(kNewCluster2Name); balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); - // Change RDS resource to set up prefix matching to direct traffic to the - // second new cluster. + // Change RDS resource to set up prefix matching and path matching that do + // match the traffic, so traffic goes to the default cluster. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); auto* mismatched_route1 = @@ -2299,7 +2380,7 @@ TEST_P(LdsTest, XdsRoutingDefaultRoute) { auto* mismatched_route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); mismatched_route2->mutable_match()->set_path( - "/grpc.testing.EchoTestService/EchoMismatch"); + "/grpc.testing.EchoTestService/Echo1"); mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2308,7 +2389,6 @@ TEST_P(LdsTest, XdsRoutingDefaultRoute) { Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - // Wait for the new backend to come up. WaitForAllBackends(0, 2); CheckRpcSendOk(kNumRpcs); // Make sure RPCs all go to the correct backend. From 0c2f9565f421816a81472065e649ef398f9fda86 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 7 Apr 2020 22:08:54 -0700 Subject: [PATCH 09/37] Fixing code review comments --- .../lb_policy/xds/xds_routing.cc | 198 +++++++++--------- .../ext/filters/client_channel/xds/xds_api.cc | 6 + .../filters/client_channel/xds/xds_client.cc | 65 +++--- test/cpp/end2end/xds_end2end_test.cc | 16 +- 4 files changed, 153 insertions(+), 132 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 53e6b26767a..7098b9ae9db 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -228,8 +228,8 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { for (const Route& route : route_table_) { if ((path_elements[0] == route.matcher.service && (path_elements[1] == route.matcher.method || - "" == route.matcher.method)) || - ("" == route.matcher.service && "" == route.matcher.method)) { + route.matcher.method.empty())) || + (route.matcher.service.empty() && route.matcher.method.empty())) { return route.picker.get()->Pick(args); } } @@ -237,8 +237,7 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { result.type = PickResult::PICK_FAILED; result.error = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "xds routing picker not given any picker; default " - "route not configured"), + "xds routing picker: no matching route"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); return result; } @@ -302,7 +301,7 @@ void XdsRoutingLb::UpdateLocked(UpdateArgs args) { // Add or update the actions in the new config. for (const auto& p : config_->action_map()) { const std::string& name = p.first; - RefCountedPtr config = p.second; + const RefCountedPtr& config = p.second; auto it = actions_.find(name); if (it == actions_.end()) { it = actions_.emplace(std::make_pair(name, nullptr)).first; @@ -367,21 +366,20 @@ void XdsRoutingLb::UpdateStateLocked() { switch (connectivity_state) { case GRPC_CHANNEL_READY: { RoutePicker::RouteTable route_table; - for (int i = 0; i < config_->route_table().size(); ++i) { + for (const auto& config_route : config_->route_table()) { RoutePicker::Route route; - route.matcher = config_->route_table()[i].matcher; - auto it = actions_.find(config_->route_table()[i].action); - if (it != actions_.end()) { - route.picker = it->second->picker_wrapper(); - } else { - gpr_log(GPR_INFO, - "[xds_routing_lb %p] child policy may have mis-behaved and " - "did not return a picker, creating a QueuePicker for %s", - this, config_->route_table()[i].action.c_str()); + route.matcher = config_route.matcher; + route.picker = actions_[config_route.action]->picker_wrapper(); + if (route.picker == nullptr) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { + gpr_log(GPR_INFO, + "[xds_routing_lb %p] child %s has not yet returned a " + "picker; creating a QueuePicker.", + this, config_route.action.c_str()); + } route.picker = MakeRefCounted( - config_->route_table()[i].action, - absl::make_unique( - Ref(DEBUG_LOCATION, "QueuePicker"))); + config_route.action, absl::make_unique( + Ref(DEBUG_LOCATION, "QueuePicker"))); } route_table.push_back(std::move(route)); } @@ -625,7 +623,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { std::vector error_list; // action map. XdsRoutingLbConfig::ActionMap action_map; - std::set action_in_use_set; + std::set action_in_use; auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -649,6 +647,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { error_list.push_back(error); } else { action_map[p.first] = std::move(child_config); + action_in_use.insert(p.first); } } } @@ -667,63 +666,26 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { } else { const Json::Array& array = it->second.array_value(); for (size_t i = 0; i < array.size(); ++i) { - const Json& element = array[i]; - if (element.type() != Json::Type::OBJECT) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("filed: routes element: ", i, - " should be of type object") - .c_str())); - } else { - XdsRoutingLbConfig::Route route; - // Parse MethodName. - auto it = element.object_value().find("methodName"); - if (it == json.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field:routes element: ", i, - " methodName is required") - .c_str())); - } else if (it->second.type() != Json::Type::OBJECT) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field:routes element: ", i, - " methodName type should be object") - .c_str())); - } else { - std::vector route_errors = - ParseRouteConfig(it->second, &route.matcher); - if (!route_errors.empty()) { - grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( - absl::StrCat("field:routes element: ", i, " error").c_str()); - for (grpc_error* route_error : route_errors) { - error = grpc_error_add_child(error, route_error); - } - error_list.push_back(error); - } - } - // Parse action. - it = element.object_value().find("action"); - if (it == json.object_value().end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field:routes element: ", i, " action is required") - .c_str())); - } else if (it->second.type() != Json::Type::STRING) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field:routes element: ", i, - " action type should be string") - .c_str())); - } else { - route.action = it->second.string_value(); - } - // Validate action exists and mark it as used. - if (action_map.find(route.action) == action_map.end()) { - grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("action ", route.action, " does not exist") - .c_str()); - error_list.push_back(error); - } else { - action_in_use_set.insert(route.action); + XdsRoutingLbConfig::Route route; + std::vector route_errors = ParseRoute(array[i], &route); + if (!route_errors.empty()) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( + absl::StrCat("field:routes element: ", i, " error").c_str()); + for (grpc_error* route_error : route_errors) { + error = grpc_error_add_child(error, route_error); } - route_table.emplace_back(std::move(route)); + error_list.push_back(error); + } + // Validate action exists and mark it as used. + if (action_map.find(route.action) == action_map.end()) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field: routes element: ", i, " error: action ", + route.action, " does not exist") + .c_str()); + error_list.push_back(error); } + action_in_use.erase(route.action); + route_table.emplace_back(std::move(route)); } } if (route_table.size() == 0) { @@ -731,11 +693,15 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { GRPC_ERROR_CREATE_FROM_STATIC_STRING("no valid routes configured"); error_list.push_back(error); } - for (const auto& action : action_map) { - if (action_in_use_set.find(action.first) == action_in_use_set.end()) { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("action ", action.first, " is never used").c_str())); - } + if (!(route_table[route_table.size() - 1].matcher.service.empty() && + route_table[route_table.size() - 1].matcher.method.empty())) { + grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "default route must not contain service or method"); + error_list.push_back(error); + } + if (!action_in_use.empty()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "some actions were not referenced by any route")); } if (!error_list.empty()) { *error = GRPC_ERROR_CREATE_FROM_VECTOR( @@ -775,35 +741,77 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { return error_list; } - static std::vector ParseRouteConfig( + static std::vector ParseMethodName( const Json& json, XdsRoutingLbConfig::Matcher* route_config) { std::vector error_list; + if (json.type() != Json::Type::OBJECT) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:methodName should be of type object")); + return error_list; + } // Parse service auto it = json.object_value().find("service"); - if (it == json.object_value().end()) { + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:service error: should be string")); + } else { + route_config->service = it->second.string_value(); + } + } + // Parse method + it = json.object_value().find("method"); + if (it != json.object_value().end()) { + if (it->second.type() != Json::Type::STRING) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:method error: should be string")); + } else { + route_config->method = it->second.string_value(); + } + } + if (route_config->service.empty() && !route_config->method.empty()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:service error: required field not present")); - } else if (it->second.type() != Json::Type::STRING) { + "field:methodName error: service is empty when method is " + "not")); + } + return error_list; + } + + static std::vector ParseRoute(const Json& json, + XdsRoutingLbConfig::Route* route) { + std::vector error_list; + if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:service error: should be string")); + "field:route element should be of type object")); + return error_list; + } + // Parse MethodName. + auto it = json.object_value().find("methodName"); + if (it == json.object_value().end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:routes element: methodName is required")); } else { - route_config->service = it->second.string_value(); + std::vector route_errors = + ParseMethodName(it->second, &route->matcher); + if (!route_errors.empty()) { + grpc_error* error = + GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:route element error"); + for (grpc_error* route_error : route_errors) { + error = grpc_error_add_child(error, route_error); + } + error_list.push_back(error); + } } - // Parse method - it = json.object_value().find("method"); + // Parse action. + it = json.object_value().find("action"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:method error: required field not present")); + "field:route element: action is required")); } else if (it->second.type() != Json::Type::STRING) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:method error: should be string")); + "field:route element error action should be of type string")); } else { - route_config->method = it->second.string_value(); - } - if (route_config->service == "" && route_config->method != "") { - error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName error: service is empty when method is " - "not")); + route->action = it->second.string_value(); } return error_list; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 2c18af46a16..71e9c07dba7 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1045,6 +1045,12 @@ grpc_error* RouteConfigParse( "Path is not empty string, prefix cannot also be non-empty."); } } + if (i == (size - 1)) { + if (!(rds_route.service.empty() && rds_route.method.empty())) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Default route must have empty service and method"); + } + } if (!envoy_api_v2_route_Route_has_route(route)) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No RouteAction found in route."); diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 29ca7e78901..568b437fac5 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -903,6 +903,10 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( ? lds_update->route_config_name.c_str() : "")); if (lds_update->rds_update.has_value()) { + gpr_log(GPR_INFO, + " [xds_client %p] LDS update received; LDS RouteConfiguration " + "contains %lu routes", + this, lds_update->rds_update.value().routes.size()); for (const auto& route : lds_update->rds_update.value().routes) { gpr_log(GPR_INFO, " route: { service=\"%s\", " @@ -960,15 +964,18 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdate( GRPC_ERROR_CREATE_FROM_STATIC_STRING( "RDS update does not include requested resource")); return; - } else { - if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { - for (const auto& route : rds_update.value().routes) { - gpr_log(GPR_INFO, - " route: { service=\"%s\", " - "method=\"%s\" }, cluster=\"%s\" }", - route.service.c_str(), route.method.c_str(), - route.cluster_name.c_str()); - } + } + if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) { + gpr_log(GPR_INFO, + "[xds_client %p] RDS update received; RouteConfiguration contains " + "%lu routes", + this, rds_update.value().routes.size()); + for (const auto& route : rds_update.value().routes) { + gpr_log(GPR_INFO, + " route: { service=\"%s\", " + "method=\"%s\" }, cluster=\"%s\" }", + route.service.c_str(), route.method.c_str(), + route.cluster_name.c_str()); } } auto& rds_state = state_map_[XdsApi::kRdsTypeUrl]; @@ -2048,31 +2055,29 @@ void XdsClient::ResetBackoff() { namespace { std::string CreateServiceConfigActionCluster(const std::string& cluster_name) { - std::string json = absl::StrFormat( - " \"cds:%s\":{\n" - " \"child_policy\":[ {\n" - " \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " }\n" - " } ]\n" - " }", - cluster_name.c_str(), cluster_name.c_str()); - return json; + return ( + absl::StrFormat(" \"cds:%s\":{\n" + " \"child_policy\":[ {\n" + " \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " }\n" + " } ]\n" + " }", + cluster_name.c_str(), cluster_name.c_str())); } std::string CreateServiceConfigRoute(const std::string& cluster_name, const std::string& service, const std::string& method) { - std::string json = absl::StrFormat( - " { \n" - " \"methodName\": {\n" - " \"service\": \"%s\",\n" - " \"method\": \"%s\"\n" - " },\n" - " \"action\": \"cds:%s\"\n" - " }", - service.c_str(), method.c_str(), cluster_name.c_str()); - return json; + return ( + absl::StrFormat(" { \n" + " \"methodName\": {\n" + " \"service\": \"%s\",\n" + " \"method\": \"%s\"\n" + " },\n" + " \"action\": \"cds:%s\"\n" + " }", + service.c_str(), method.c_str(), cluster_name.c_str())); } } // namespace @@ -2111,8 +2116,6 @@ grpc_error* XdsClient::CreateServiceConfig( std::string json = absl::StrJoin(config_parts, ""); grpc_error* error = GRPC_ERROR_NONE; *service_config = ServiceConfig::Create(json.c_str(), &error); - gpr_log(GPR_INFO, "Built service config: \"%s\"", - service_config->get()->json_string().c_str()); return error; } diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index f7772ba0376..5b86667ca67 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -2155,8 +2155,8 @@ TEST_P(LdsTest, DefaultRouteInvalid) { AdsServiceImpl::NACKED); } -// Tests that LDS client should send a ACK if route match has non-empty prefix -// in the LDS response. +// Tests that LDS client should send a NACK if route match has non-empty prefix +// as the only route (default) in the LDS response. TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); @@ -2170,7 +2170,7 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { SetNextResolutionForLbChannelAllBalancers(); CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), - AdsServiceImpl::ACKED); + AdsServiceImpl::NACKED); } // Tests that LDS client should send a NACK if route has an action other than @@ -2321,6 +2321,10 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTestService"); matched_route->mutable_route()->set_cluster(kNewCluster2Name); + auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_match()->set_path(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); @@ -2471,8 +2475,8 @@ TEST_P(RdsTest, DefaultRouteInvalid) { AdsServiceImpl::NACKED); } -// Tests that RDS client should send a ACK if route match has non-empty prefix -// in the RDS response. +// Tests that RDS client should send a NACK if route match has non-empty prefix +// as the only route (default) in the RDS response. TEST_P(RdsTest, RouteMatchHasNonemptyPrefix) { balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); RouteConfiguration route_config = @@ -2487,7 +2491,7 @@ TEST_P(RdsTest, RouteMatchHasNonemptyPrefix) { SetNextResolutionForLbChannelAllBalancers(); CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->rds_response_state(), - AdsServiceImpl::ACKED); + AdsServiceImpl::NACKED); } // Tests that RDS client should send a NACK if route has an action other than From d9611cf1ca60e3d9bb1c5d37657fe9aef22c42c6 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Wed, 8 Apr 2020 16:20:16 -0700 Subject: [PATCH 10/37] Fixed AllServerUnavailableFailFast test to return UNAVAILBLE instead UNKNOWN. --- .../ext/filters/client_channel/lb_policy/xds/xds_routing.cc | 4 ++-- test/cpp/end2end/xds_end2end_test.cc | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 7098b9ae9db..19542fc8b3a 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -393,8 +393,8 @@ void XdsRoutingLb::UpdateStateLocked() { break; default: picker = absl::make_unique( - GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "xds_routing: all children report state TRANSIENT_FAILURE")); + grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("TRANSIENT_FAILURE from XdsRoutingLb"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); } channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); } diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 5b86667ca67..cb86de8aa86 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1748,9 +1748,7 @@ TEST_P(BasicTest, AllServersUnreachableFailFast) { AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); const Status status = SendRpc(); // The error shouldn't be DEADLINE_EXCEEDED. - gpr_log(GPR_INFO, "error code %d message received %s", status.error_code(), - status.error_message().c_str()); - EXPECT_NE(StatusCode::DEADLINE_EXCEEDED, status.error_code()); + EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); } // Tests that RPCs fail when the backends are down, and will succeed again after From c4d4541af565924ffa0986968a832646af948e3f Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 9 Apr 2020 22:49:57 -0700 Subject: [PATCH 11/37] Added TestMultipleServiceImpl.h: a templated test service to allow multiple RPC services to be used in test. --- .../lb_policy/xds/xds_routing.cc | 10 +- src/proto/grpc/testing/echo.proto | 26 + test/cpp/end2end/BUILD | 15 +- test/cpp/end2end/test_multiple_service_impl.h | 502 ++++++++++++++++++ test/cpp/end2end/xds_end2end_test.cc | 111 ++-- 5 files changed, 625 insertions(+), 39 deletions(-) create mode 100644 test/cpp/end2end/test_multiple_service_impl.h diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 20f34487d22..698463bcadc 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -244,8 +244,7 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { // XdsRoutingLb // -XdsRoutingLb::XdsRoutingLb(Args args) - : LoadBalancingPolicy(std::move(args)) {} +XdsRoutingLb::XdsRoutingLb(Args args) : LoadBalancingPolicy(std::move(args)) {} XdsRoutingLb::~XdsRoutingLb() { if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_routing_lb_trace)) { @@ -379,9 +378,10 @@ void XdsRoutingLb::UpdateStateLocked() { absl::make_unique(Ref(DEBUG_LOCATION, "QueuePicker")); break; default: - picker = absl::make_unique( - grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("TRANSIENT_FAILURE from XdsRoutingLb"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); + picker = absl::make_unique(grpc_error_set_int( + GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "TRANSIENT_FAILURE from XdsRoutingLb"), + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE)); } channel_control_helper()->UpdateState(connectivity_state, std::move(picker)); } diff --git a/src/proto/grpc/testing/echo.proto b/src/proto/grpc/testing/echo.proto index 6ea9873928e..db583a1305b 100644 --- a/src/proto/grpc/testing/echo.proto +++ b/src/proto/grpc/testing/echo.proto @@ -33,6 +33,32 @@ service EchoTestService { rpc Unimplemented(EchoRequest) returns (EchoResponse); } +service EchoTest1Service { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); + // A service which checks that the initial metadata sent over contains some + // expected key value pair + rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); + rpc RequestStream(stream EchoRequest) returns (EchoResponse); + rpc ResponseStream(EchoRequest) returns (stream EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); + rpc Unimplemented(EchoRequest) returns (EchoResponse); +} + +service EchoTest2Service { + rpc Echo(EchoRequest) returns (EchoResponse); + rpc Echo1(EchoRequest) returns (EchoResponse); + rpc Echo2(EchoRequest) returns (EchoResponse); + // A service which checks that the initial metadata sent over contains some + // expected key value pair + rpc CheckClientInitialMetadata(SimpleRequest) returns (SimpleResponse); + rpc RequestStream(stream EchoRequest) returns (EchoResponse); + rpc ResponseStream(EchoRequest) returns (stream EchoResponse); + rpc BidiStream(stream EchoRequest) returns (stream EchoResponse); + rpc Unimplemented(EchoRequest) returns (EchoResponse); +} + service UnimplementedEchoService { rpc Unimplemented(EchoRequest) returns (EchoResponse); } diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index a20128dcc2d..b8dbf5b4db5 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -35,6 +35,19 @@ grpc_cc_library( ], ) +grpc_cc_library( + name = "test_multiple_service_impl", + testonly = True, + hdrs = ["test_multiple_service_impl.h"], + external_deps = [ + "gtest", + ], + deps = [ + "//src/proto/grpc/testing:echo_proto", + "//test/cpp/util:test_util", + ], +) + grpc_cc_library( name = "test_health_check_service_impl", testonly = True, @@ -512,7 +525,7 @@ grpc_cc_test( "no_windows", ], # TODO(jtattermusch): fix test on windows deps = [ - ":test_service_impl", + ":test_multiple_service_impl", "//:gpr", "//:grpc", "//:grpc++", diff --git a/test/cpp/end2end/test_multiple_service_impl.h b/test/cpp/end2end/test_multiple_service_impl.h new file mode 100644 index 00000000000..f5ec71b79f6 --- /dev/null +++ b/test/cpp/end2end/test_multiple_service_impl.h @@ -0,0 +1,502 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H +#define GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/cpp/util/string_ref_helper.h" + +using std::chrono::system_clock; + +namespace grpc { +namespace testing { + +const int kServerDefaultResponseStreamsToSend = 3; +const char* const kServerResponseStreamsToSend = "server_responses_to_send"; +const char* const kServerTryCancelRequest = "server_try_cancel"; +const char* const kDebugInfoTrailerKey = "debug-info-bin"; +const char* const kServerFinishAfterNReads = "server_finish_after_n_reads"; +const char* const kServerUseCoalescingApi = "server_use_coalescing_api"; +const char* const kCheckClientInitialMetadataKey = "custom_client_metadata"; +const char* const kCheckClientInitialMetadataVal = "Value for client metadata"; + +typedef enum { + DO_NOT_CANCEL = 0, + CANCEL_BEFORE_PROCESSING, + CANCEL_DURING_PROCESSING, + CANCEL_AFTER_PROCESSING +} ServerTryCancelRequestPhase; + +namespace { + +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } +} + +void CheckServerAuthContext( + const experimental::ServerContextBase* context, + const grpc::string& expected_transport_security_type, + const grpc::string& expected_client_identity) { + std::shared_ptr auth_ctx = context->auth_context(); + std::vector tst = + auth_ctx->FindPropertyValues("transport_security_type"); + EXPECT_EQ(1u, tst.size()); + EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); + if (expected_client_identity.empty()) { + EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); + EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); + EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); + } else { + auto identity = auth_ctx->GetPeerIdentity(); + EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); + EXPECT_EQ(1u, identity.size()); + EXPECT_EQ(expected_client_identity, identity[0]); + } +} + +// Returns the number of pairs in metadata that exactly match the given +// key-value pair. Returns -1 if the pair wasn't found. +int MetadataMatchCount( + const std::multimap& metadata, + const grpc::string& key, const grpc::string& value) { + int count = 0; + for (const auto& metadatum : metadata) { + if (ToString(metadatum.first) == key && + ToString(metadatum.second) == value) { + count++; + } + } + return count; +} +} // namespace + +namespace { +int GetIntValueFromMetadataHelper( + const char* key, + const std::multimap& metadata, + int default_value) { + if (metadata.find(key) != metadata.end()) { + std::istringstream iss(ToString(metadata.find(key)->second)); + iss >> default_value; + gpr_log(GPR_INFO, "%s : %d", key, default_value); + } + + return default_value; +} + +int GetIntValueFromMetadata( + const char* key, + const std::multimap& metadata, + int default_value) { + return GetIntValueFromMetadataHelper(key, metadata, default_value); +} + +void ServerTryCancel(ServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); + // Now wait until it's really canceled + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); + } +} + +void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, + "Server called TryCancelNonblocking() to cancel the request"); +} + +} // namespace + +class TestMultipleServiceSignaller { + public: + void ClientWaitUntilRpcStarted() { + std::unique_lock lock(mu_); + cv_rpc_started_.wait(lock, [this] { return rpc_started_; }); + } + void ServerWaitToContinue() { + std::unique_lock lock(mu_); + cv_server_continue_.wait(lock, [this] { return server_should_continue_; }); + } + void SignalClientThatRpcStarted() { + std::unique_lock lock(mu_); + rpc_started_ = true; + cv_rpc_started_.notify_one(); + } + void SignalServerToContinue() { + std::unique_lock lock(mu_); + server_should_continue_ = true; + cv_server_continue_.notify_one(); + } + + private: + std::mutex mu_; + std::condition_variable cv_rpc_started_; + bool rpc_started_ /* GUARDED_BY(mu_) */ = false; + std::condition_variable cv_server_continue_; + bool server_should_continue_ /* GUARDED_BY(mu_) */ = false; +}; + +template +class TestMultipleServiceImpl : public RpcService { + public: + TestMultipleServiceImpl() : signal_client_(false), host_() {} + explicit TestMultipleServiceImpl(const grpc::string& host) + : signal_client_(false), host_(new grpc::string(host)) {} + + Status Echo(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + if (request->has_param() && + request->param().server_notify_client_when_started()) { + signaller_.SignalClientThatRpcStarted(); + signaller_.ServerWaitToContinue(); + } + + // A bit of sleep to make sure that short deadline tests fail + if (request->has_param() && request->param().server_sleep_us() > 0) { + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_micros(request->param().server_sleep_us(), + GPR_TIMESPAN))); + } + + if (request->has_param() && request->param().server_die()) { + gpr_log(GPR_ERROR, "The request should not reach application handler."); + GPR_ASSERT(0); + } + if (request->has_param() && request->param().has_expected_error()) { + const auto& error = request->param().expected_error(); + return Status(static_cast(error.code()), + error.error_message(), error.binary_error_details()); + } + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + if (server_try_cancel > DO_NOT_CANCEL) { + // Since this is a unary RPC, by the time this server handler is called, + // the 'request' message is already read from the client. So the scenarios + // in server_try_cancel don't make much sense. Just cancel the RPC as long + // as server_try_cancel is not DO_NOT_CANCEL + ServerTryCancel(context); + return Status::CANCELLED; + } + + response->set_message(request->message()); + MaybeEchoDeadline(context, request, response); + if (host_) { + response->mutable_param()->set_host(*host_); + } + if (request->has_param() && request->param().client_cancel_after_us()) { + { + std::unique_lock lock(mu_); + signal_client_ = true; + } + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().client_cancel_after_us(), + GPR_TIMESPAN))); + } + return Status::CANCELLED; + } else if (request->has_param() && + request->param().server_cancel_after_us()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().server_cancel_after_us(), + GPR_TIMESPAN))); + return Status::CANCELLED; + } else if (!request->has_param() || + !request->param().skip_cancelled_check()) { + EXPECT_FALSE(context->IsCancelled()); + } + + if (request->has_param() && request->param().echo_metadata_initially()) { + const std::multimap& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddInitialMetadata(ToString(metadatum.first), + ToString(metadatum.second)); + } + } + + if (request->has_param() && request->param().echo_metadata()) { + const std::multimap& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddTrailingMetadata(ToString(metadatum.first), + ToString(metadatum.second)); + } + // Terminate rpc with error and debug info in trailer. + if (request->param().debug_info().stack_entries_size() || + !request->param().debug_info().detail().empty()) { + grpc::string serialized_debug_info = + request->param().debug_info().SerializeAsString(); + context->AddTrailingMetadata(kDebugInfoTrailerKey, + serialized_debug_info); + return Status::CANCELLED; + } + } + if (request->has_param() && + (request->param().expected_client_identity().length() > 0 || + request->param().check_auth_context())) { + CheckServerAuthContext( + context, request->param().expected_transport_security_type(), + request->param().expected_client_identity()); + } + if (request->has_param() && + request->param().response_message_length() > 0) { + response->set_message( + grpc::string(request->param().response_message_length(), '\0')); + } + if (request->has_param() && request->param().echo_peer()) { + response->mutable_param()->set_peer(context->peer()); + } + return Status::OK; + } + + Status Echo1(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); + } + + Status Echo2(ServerContext* context, const EchoRequest* request, + EchoResponse* response) { + return Echo(context, request, response); + } + + Status CheckClientInitialMetadata(ServerContext* context, + const SimpleRequest* /*request*/, + SimpleResponse* /*response*/) { + EXPECT_EQ(MetadataMatchCount(context->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), + 1); + EXPECT_EQ(1u, + context->client_metadata().count(kCheckClientInitialMetadataKey)); + return Status::OK; + } + + // Unimplemented is left unimplemented to test the returned error. + Status RequestStream(ServerContext* context, + ServerReader* reader, + EchoResponse* response) { + // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by + // the server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads + // any message from the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading messages from the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads + // all the messages from the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + response->set_message(""); + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + int num_msgs_read = 0; + while (reader->Read(&request)) { + response->mutable_message()->append(request.message()); + } + gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Return 'kNumResponseStreamMsgs' messages. + // TODO(yangg) make it generic by adding a parameter into EchoRequest + Status ResponseStream(ServerContext* context, const EchoRequest* request, + ServerWriter* writer) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes + // any messages to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // writing messages to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes + // all the messages to the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + int server_coalescing_api = GetIntValueFromMetadata( + kServerUseCoalescingApi, context->client_metadata(), 0); + + int server_responses_to_send = GetIntValueFromMetadata( + kServerResponseStreamsToSend, context->client_metadata(), + kServerDefaultResponseStreamsToSend); + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + EchoResponse response; + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + for (int i = 0; i < server_responses_to_send; i++) { + response.set_message(request->message() + grpc::to_string(i)); + if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { + writer->WriteLast(response, WriteOptions()); + } else { + writer->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + Status BidiStream(ServerContext* context, + ServerReaderWriter* stream) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ + // writes any messages from/to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading/writing messages from/to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server + // reads/writes all messages from/to the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + EchoResponse response; + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + // kServerFinishAfterNReads suggests after how many reads, the server should + // write the last message and send status (coalesced using WriteLast) + int server_write_last = GetIntValueFromMetadata( + kServerFinishAfterNReads, context->client_metadata(), 0); + + int read_counts = 0; + while (stream->Read(&request)) { + read_counts++; + gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); + response.set_message(request.message()); + if (read_counts == server_write_last) { + stream->WriteLast(response, WriteOptions()); + } else { + stream->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Unimplemented is left unimplemented to test the returned error. + bool signal_client() { + std::unique_lock lock(mu_); + return signal_client_; + } + void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } + void SignalServerToContinue() { signaller_.SignalServerToContinue(); } + + private: + bool signal_client_; + std::mutex mu_; + TestMultipleServiceSignaller signaller_; + std::unique_ptr host_; +}; + +} // namespace testing +} // namespace grpc + +#endif // GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 5f97ba401cb..43f9e91a216 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -57,7 +57,7 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" -#include "test/cpp/end2end/test_service_impl.h" +#include "test/cpp/end2end/test_multiple_service_impl.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h" @@ -233,13 +233,14 @@ class CountedService : public ServiceType { size_t response_count_ = 0; }; -using BackendService = CountedService; using LrsService = CountedService; const char g_kCallCredsMdKey[] = "Balancer should not ..."; const char g_kCallCredsMdValue[] = "... receive me"; -class BackendServiceImpl : public BackendService { +template +class BackendServiceImpl + : public CountedService> { public: BackendServiceImpl() {} @@ -252,9 +253,11 @@ class BackendServiceImpl : public BackendService { if (call_credentials_entry != context->client_metadata().end()) { EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); } - IncreaseRequestCount(); - const auto status = TestServiceImpl::Echo(context, request, response); - IncreaseResponseCount(); + CountedService>::IncreaseRequestCount(); + const auto status = + TestMultipleServiceImpl::Echo(context, request, response); + CountedService< + TestMultipleServiceImpl>::IncreaseResponseCount(); AddClient(context->peer()); return status; } @@ -268,8 +271,12 @@ class BackendServiceImpl : public BackendService { if (call_credentials_entry != context->client_metadata().end()) { EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); } - echo1_request_count_++; - const auto status = TestServiceImpl::Echo1(context, request, response); + CountedService< + TestMultipleServiceImpl>::IncreaseResponseCount(); + const auto status = + TestMultipleServiceImpl::Echo1(context, request, response); + CountedService< + TestMultipleServiceImpl>::IncreaseResponseCount(); AddClient(context->peer()); return status; } @@ -283,8 +290,12 @@ class BackendServiceImpl : public BackendService { if (call_credentials_entry != context->client_metadata().end()) { EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); } - echo2_request_count_++; - const auto status = TestServiceImpl::Echo2(context, request, response); + CountedService< + TestMultipleServiceImpl>::IncreaseResponseCount(); + const auto status = + TestMultipleServiceImpl::Echo2(context, request, response); + CountedService< + TestMultipleServiceImpl>::IncreaseResponseCount(); AddClient(context->peer()); return status; } @@ -297,10 +308,6 @@ class BackendServiceImpl : public BackendService { return clients_; } - size_t Echo1RequestCount() { return echo1_request_count_; } - - size_t Echo2RequestCount() { return echo2_request_count_; } - private: void AddClient(const grpc::string& client) { grpc_core::MutexLock lock(&clients_mu_); @@ -310,8 +317,6 @@ class BackendServiceImpl : public BackendService { grpc_core::Mutex mu_; grpc_core::Mutex clients_mu_; std::set clients_; - size_t echo1_request_count_ = 0; - size_t echo2_request_count_ = 0; }; class ClientStats { @@ -1227,6 +1232,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { channel_creds->Unref(); channel_ = ::grpc::CreateCustomChannel(uri.str(), creds, args); stub_ = grpc::testing::EchoTestService::NewStub(channel_); + stub1_ = grpc::testing::EchoTest1Service::NewStub(channel_); + stub2_ = grpc::testing::EchoTest2Service::NewStub(channel_); } void ResetBackendCounters() { @@ -1407,7 +1414,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); - Status status = stub_->Echo1(&context, request, response); + Status status = stub1_->Echo1(&context, request, response); if (local_response) delete response; return status; } @@ -1421,7 +1428,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); - Status status = stub_->Echo2(&context, request, response); + Status status = stub2_->Echo2(&context, request, response); if (local_response) delete response; return status; } @@ -1541,20 +1548,46 @@ class XdsEnd2endTest : public ::testing::TestWithParam { class BackendServerThread : public ServerThread { public: - BackendServiceImpl* backend_service() { return &backend_service_; } + BackendServiceImpl<::grpc::testing::EchoTestService::Service>* + backend_service() { + return &backend_service_; + } + BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>* + backend1_service() { + return &backend1_service_; + } + BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>* + backend2_service() { + return &backend2_service_; + } private: void RegisterAllServices(ServerBuilder* builder) override { builder->RegisterService(&backend_service_); + builder->RegisterService(&backend1_service_); + builder->RegisterService(&backend2_service_); } - void StartAllServices() override { backend_service_.Start(); } + void StartAllServices() override { + backend_service_.Start(); + backend1_service_.Start(); + backend2_service_.Start(); + } - void ShutdownAllServices() override { backend_service_.Shutdown(); } + void ShutdownAllServices() override { + backend_service_.Shutdown(); + backend1_service_.Shutdown(); + backend2_service_.Shutdown(); + } const char* Type() override { return "Backend"; } - BackendServiceImpl backend_service_; + BackendServiceImpl<::grpc::testing::EchoTestService::Service> + backend_service_; + BackendServiceImpl<::grpc::testing::EchoTest1Service::Service> + backend1_service_; + BackendServiceImpl<::grpc::testing::EchoTest2Service::Service> + backend2_service_; }; class BalancerServerThread : public ServerThread { @@ -1593,6 +1626,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { const int client_load_reporting_interval_seconds_; std::shared_ptr channel_; std::unique_ptr stub_; + std::unique_ptr stub1_; + std::unique_ptr stub2_; std::vector> backends_; std::vector> balancers_; grpc_core::RefCountedPtr @@ -2261,12 +2296,12 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { auto* mismatched_route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); mismatched_route1->mutable_match()->set_path( - "/grpc.testing.EchoTestService/Echo1"); + "/grpc.testing.EchoTest1Service/Echo1"); mismatched_route1->mutable_route()->set_cluster(kNewCluster1Name); auto* mismatched_route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); mismatched_route2->mutable_match()->set_path( - "/grpc.testing.EchoTestService/Echo2"); + "/grpc.testing.EchoTest2Service/Echo2"); mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2280,11 +2315,17 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 4; ++i) { if (i == 2) { - EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo1RequestCount()); + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(kNumRpcs, backends_[i]->backend1_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); } else if (i == 3) { - EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo2RequestCount()); + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); + EXPECT_EQ(kNumRpcs, backends_[i]->backend2_service()->request_count()); } else { EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); } } } @@ -2322,10 +2363,10 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { auto* mismatched_route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); mismatched_route->mutable_match()->set_prefix( - "/grpc.testing.EchoTestService0"); + "/grpc.testing.EchoTestService"); mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTestService"); + matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); matched_route->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2338,9 +2379,13 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 4; ++i) { if (i == 3) { - EXPECT_EQ(kNumRpcs, backends_[i]->backend_service()->Echo1RequestCount()); + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(kNumRpcs, backends_[i]->backend1_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); } else { - EXPECT_EQ(0, backends_[i]->backend_service()->Echo1RequestCount()); + EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); } } } @@ -2727,7 +2772,7 @@ TEST_P(LocalityMapTest, NoLocalities) { // Tests that the locality map can work properly even when it contains a large // number of localities. -TEST_P(LocalityMapTest, StressTest) { +/*TEST_P(LocalityMapTest, StressTest) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumLocalities = 100; @@ -2751,13 +2796,13 @@ TEST_P(LocalityMapTest, StressTest) { AdsServiceImpl::BuildEdsResource(args), 60 * 1000, kDefaultResourceName)); // Wait until backend 0 is ready, before which kNumLocalities localities are // received and handled by the xds policy. - WaitForBackend(0, /*reset_counters=*/false); + WaitForBackend(0, /*reset_counters=*false); EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); // Wait until backend 1 is ready, before which kNumLocalities localities are // removed by the xds policy. WaitForBackend(1); delayed_resource_setter.join(); -} +}*/ // Tests that the localities in a locality map are picked correctly after update // (addition, modification, deletion). From 8a8ca5436b4cee4bfec93c391323e43054991b3e Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Fri, 10 Apr 2020 01:26:05 -0700 Subject: [PATCH 12/37] Added grpc arg to enable xds routing and restore old tests. --- include/grpc/impl/codegen/grpc_types.h | 2 ++ .../ext/filters/client_channel/xds/xds_api.cc | 33 +++++++++++-------- .../ext/filters/client_channel/xds/xds_api.h | 1 + .../filters/client_channel/xds/xds_client.cc | 8 ++++- .../filters/client_channel/xds/xds_client.h | 2 ++ test/cpp/end2end/test_multiple_service_impl.h | 7 ---- test/cpp/end2end/xds_end2end_test.cc | 29 +++++++++++----- 7 files changed, 52 insertions(+), 30 deletions(-) diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index ff45450f3a3..1e4c9fd0a23 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -358,6 +358,8 @@ typedef struct { * The default is 15 seconds. */ #define GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS \ "grpc.xds_resource_does_not_exist_timeout_ms" +/* if set, enable xds routing policy */ +#define GRPC_ARG_XDS_ROUTING_ENABLED "grpc.xds_routing_enabled" /** If non-zero, grpc server's cronet compression workaround will be enabled */ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ "grpc.workaround.cronet_compression" diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 81abb35ff6f..0ed3753bd91 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -952,7 +952,8 @@ MatchType DomainPatternMatchType(const std::string& domain_pattern) { grpc_error* RouteConfigParse( XdsClient* client, TraceFlag* tracer, const envoy_api_v2_RouteConfiguration* route_config, - const std::string& expected_server_name, XdsApi::RdsUpdate* rds_update) { + const std::string& expected_server_name, const bool xds_routing_enabled, + XdsApi::RdsUpdate* rds_update) { MaybeLogRouteConfiguration(client, tracer, route_config); // Get the virtual hosts. size_t size; @@ -1012,7 +1013,12 @@ grpc_error* RouteConfigParse( return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No route found in the virtual host."); } - for (size_t i = 0; i < size; ++i) { + + // If xds_routing is not configured, only look at the last one in the route + // list (the default route) + size_t start_index = size - 1; + if (xds_routing_enabled) start_index = 0; + for (size_t i = start_index; i < size; ++i) { const envoy_api_v2_route_Route* route = routes[i]; const envoy_api_v2_route_RouteMatch* match = envoy_api_v2_route_Route_match(route); @@ -1066,10 +1072,6 @@ grpc_error* RouteConfigParse( envoy_api_v2_route_RouteAction_cluster(route_action); rds_route.cluster_name = std::string(action.data, action.size); rds_update->routes.emplace_back(std::move(rds_route)); - gpr_log(GPR_INFO, "RouteConfigParse a route %s %s %s", - rds_update->routes[i].service.c_str(), - rds_update->routes[i].method.c_str(), - rds_update->routes[i].cluster_name.c_str()); } return GRPC_ERROR_NONE; } @@ -1077,6 +1079,7 @@ grpc_error* RouteConfigParse( grpc_error* LdsResponseParse(XdsClient* client, TraceFlag* tracer, const envoy_api_v2_DiscoveryResponse* response, const std::string& expected_server_name, + const bool xds_routing_enabled, absl::optional* lds_update, upb_arena* arena) { // Get the resources from the response. @@ -1122,8 +1125,9 @@ grpc_error* LdsResponseParse(XdsClient* client, TraceFlag* tracer, envoy_config_filter_network_http_connection_manager_v2_HttpConnectionManager_route_config( http_connection_manager); XdsApi::RdsUpdate rds_update; - grpc_error* error = RouteConfigParse(client, tracer, route_config, - expected_server_name, &rds_update); + grpc_error* error = + RouteConfigParse(client, tracer, route_config, expected_server_name, + xds_routing_enabled, &rds_update); if (error != GRPC_ERROR_NONE) return error; lds_update->emplace(); (*lds_update)->rds_update.emplace(std::move(rds_update)); @@ -1154,6 +1158,7 @@ grpc_error* RdsResponseParse(XdsClient* client, TraceFlag* tracer, const envoy_api_v2_DiscoveryResponse* response, const std::string& expected_server_name, const std::string& expected_route_config_name, + const bool xds_routing_enabled, absl::optional* rds_update, upb_arena* arena) { // Get the resources from the response. @@ -1182,8 +1187,9 @@ grpc_error* RdsResponseParse(XdsClient* client, TraceFlag* tracer, if (!upb_strview_eql(name, expected_name)) continue; // Parse the route_config. XdsApi::RdsUpdate local_rds_update; - grpc_error* error = RouteConfigParse( - client, tracer, route_config, expected_server_name, &local_rds_update); + grpc_error* error = + RouteConfigParse(client, tracer, route_config, expected_server_name, + xds_routing_enabled, &local_rds_update); if (error != GRPC_ERROR_NONE) return error; rds_update->emplace(std::move(local_rds_update)); return GRPC_ERROR_NONE; @@ -1463,6 +1469,7 @@ grpc_error* EdsResponseParse( grpc_error* XdsApi::ParseAdsResponse( const grpc_slice& encoded_response, const std::string& expected_server_name, const std::string& expected_route_config_name, + const bool xds_routing_enabled, const std::set& expected_cluster_names, const std::set& expected_eds_service_names, absl::optional* lds_update, @@ -1494,11 +1501,11 @@ grpc_error* XdsApi::ParseAdsResponse( // Parse the response according to the resource type. if (*type_url == kLdsTypeUrl) { return LdsResponseParse(client_, tracer_, response, expected_server_name, - lds_update, arena.ptr()); + xds_routing_enabled, lds_update, arena.ptr()); } else if (*type_url == kRdsTypeUrl) { return RdsResponseParse(client_, tracer_, response, expected_server_name, - expected_route_config_name, rds_update, - arena.ptr()); + expected_route_config_name, xds_routing_enabled, + rds_update, arena.ptr()); } else if (*type_url == kCdsTypeUrl) { return CdsResponseParse(client_, tracer_, response, expected_cluster_names, cds_update_map, arena.ptr()); diff --git a/src/core/ext/filters/client_channel/xds/xds_api.h b/src/core/ext/filters/client_channel/xds/xds_api.h index 28b173f019e..9bd74c777f7 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.h +++ b/src/core/ext/filters/client_channel/xds/xds_api.h @@ -257,6 +257,7 @@ class XdsApi { const grpc_slice& encoded_response, const std::string& expected_server_name, const std::string& expected_route_config_name, + const bool xds_routing_enabled, const std::set& expected_cluster_names, const std::set& expected_eds_service_names, absl::optional* lds_update, diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 568b437fac5..78176ac865a 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -1245,7 +1245,7 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked( (xds_client->lds_result_.has_value() ? xds_client->lds_result_->route_config_name : ""), - ads_calld->ClusterNamesForRequest(), + xds_client->xds_routing_enabled_, ads_calld->ClusterNamesForRequest(), ads_calld->EdsServiceNamesForRequest(), &lds_update, &rds_update, &cds_update_map, &eds_update_map, &version, &nonce, &type_url); grpc_slice_unref_internal(response_slice); @@ -1820,6 +1820,11 @@ grpc_millis GetRequestTimeout(const grpc_channel_args& args) { {15000, 0, INT_MAX}); } +bool GetXdsRoutingEnabled(const grpc_channel_args& args) { + return grpc_channel_args_find_integer(&args, GRPC_ARG_XDS_ROUTING_ENABLED, + {0, 0, 1}); +} + } // namespace XdsClient::XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties, @@ -1828,6 +1833,7 @@ XdsClient::XdsClient(Combiner* combiner, grpc_pollset_set* interested_parties, const grpc_channel_args& channel_args, grpc_error** error) : InternallyRefCounted(&grpc_xds_client_trace), request_timeout_(GetRequestTimeout(channel_args)), + xds_routing_enabled_(GetXdsRoutingEnabled(channel_args)), combiner_(GRPC_COMBINER_REF(combiner, "xds_client")), interested_parties_(interested_parties), bootstrap_( diff --git a/src/core/ext/filters/client_channel/xds/xds_client.h b/src/core/ext/filters/client_channel/xds/xds_client.h index e958d016bce..c79c169eb2e 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.h +++ b/src/core/ext/filters/client_channel/xds/xds_client.h @@ -241,6 +241,8 @@ class XdsClient : public InternallyRefCounted { const grpc_millis request_timeout_; + const bool xds_routing_enabled_; + Combiner* combiner_; grpc_pollset_set* interested_parties_; diff --git a/test/cpp/end2end/test_multiple_service_impl.h b/test/cpp/end2end/test_multiple_service_impl.h index f5ec71b79f6..2b06117774c 100644 --- a/test/cpp/end2end/test_multiple_service_impl.h +++ b/test/cpp/end2end/test_multiple_service_impl.h @@ -141,13 +141,6 @@ void ServerTryCancel(ServerContext* context) { } } -void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, - "Server called TryCancelNonblocking() to cancel the request"); -} - } // namespace class TestMultipleServiceSignaller { diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 43f9e91a216..3a4831f7a26 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1187,7 +1187,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { void ResetStub(int fallback_timeout = 0, int failover_timeout = 0, const grpc::string& expected_targets = "", - int xds_resource_does_not_exist_timeout = 0) { + int xds_resource_does_not_exist_timeout = 0, + int xds_routing_enabled = false) { ChannelArguments args; // TODO(juanlishen): Add setter to ChannelArguments. if (fallback_timeout > 0) { @@ -1200,6 +1201,9 @@ class XdsEnd2endTest : public ::testing::TestWithParam { args.SetInt(GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS, xds_resource_does_not_exist_timeout); } + if (xds_routing_enabled) { + args.SetInt(GRPC_ARG_XDS_ROUTING_ENABLED, 1); + } // If the parent channel is using the fake resolver, we inject the // response generator for the parent here, and then SetNextResolution() // will inject the xds channel's response generator via the parent's @@ -2178,11 +2182,13 @@ TEST_P(LdsTest, ChooseMatchedDomain) { AdsServiceImpl::ACKED); } -// Tests that the LDS client should NACK when the last route is not a default -// route. -TEST_P(LdsTest, DefaultRouteInvalid) { +// Tests that LDS client should choose the last route in the virtual host if +// multiple routes exist in the LDS response. +TEST_P(LdsTest, ChooseLastRoute) { RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); + *(route_config.mutable_virtual_hosts(0)->add_routes()) = + route_config.virtual_hosts(0).routes(0); route_config.mutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_route() @@ -2193,7 +2199,7 @@ TEST_P(LdsTest, DefaultRouteInvalid) { SetNextResolutionForLbChannelAllBalancers(); (void)SendRpc(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), - AdsServiceImpl::NACKED); + AdsServiceImpl::ACKED); } // Tests that LDS client should send a NACK if route match has non-empty prefix @@ -2259,6 +2265,7 @@ TEST_P(LdsTest, Timeout) { // Tests that LDS client should choose the default route (with no matching // specified) after unable to find a match with previous routes. TEST_P(LdsTest, XdsRoutingPathMatching) { + ResetStub(0, 0, "", 0, 1); const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; const size_t kNumRpcs = 10; @@ -2331,6 +2338,7 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { } TEST_P(LdsTest, XdsRoutingPrefixMatching) { + ResetStub(0, 0, "", 0, 1); const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; const size_t kNumRpcs = 10; @@ -2393,6 +2401,7 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { // Tests that LDS client should choose the default route (with no matching // specified) after unable to find a match with previous routes. TEST_P(LdsTest, XdsRoutingDefaultRoute) { + ResetStub(0, 0, "", 0, 1); const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; const size_t kNumRpcs = 10; @@ -2507,12 +2516,14 @@ TEST_P(RdsTest, ChooseMatchedDomain) { AdsServiceImpl::ACKED); } -// Tests that the RDS client should NACK when the last route is not a default -// route. -TEST_P(RdsTest, DefaultRouteInvalid) { +// Tests that RDS client should choose the last route in the virtual host if +// multiple routes exist in the RDS response. +TEST_P(RdsTest, ChooseLastRoute) { balancers_[0]->ads_service()->SetLdsToUseDynamicRds(); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); + *(route_config.mutable_virtual_hosts(0)->add_routes()) = + route_config.virtual_hosts(0).routes(0); route_config.mutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_route() @@ -2523,7 +2534,7 @@ TEST_P(RdsTest, DefaultRouteInvalid) { SetNextResolutionForLbChannelAllBalancers(); (void)SendRpc(); EXPECT_EQ(balancers_[0]->ads_service()->rds_response_state(), - AdsServiceImpl::NACKED); + AdsServiceImpl::ACKED); } // Tests that RDS client should send a NACK if route match has non-empty prefix From 0a7b9dac06674d0c282ee7f20ff0bc5df2d59773 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Fri, 10 Apr 2020 03:15:09 -0700 Subject: [PATCH 13/37] Fixing code review comments. --- .../lb_policy/xds/xds_routing.cc | 72 ++++++++--------- .../ext/filters/client_channel/xds/xds_api.cc | 15 ++-- .../filters/client_channel/xds/xds_client.cc | 42 +++++----- test/cpp/end2end/xds_end2end_test.cc | 78 ++----------------- 4 files changed, 70 insertions(+), 137 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 698463bcadc..7bcc05cc018 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -236,7 +236,7 @@ XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { result.error = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "xds routing picker: no matching route"), - GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); + GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL); return result; } @@ -610,7 +610,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { std::vector error_list; // action map. XdsRoutingLbConfig::ActionMap action_map; - std::set action_in_use; + std::set action_to_be_used; auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -634,13 +634,13 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { error_list.push_back(error); } else { action_map[p.first] = std::move(child_config); - action_in_use.insert(p.first); + action_to_be_used.insert(p.first); } } } - if (action_map.size() == 0) { + if (action_map.empty()) { error_list.push_back( - GRPC_ERROR_CREATE_FROM_COPIED_STRING("no valid actions configured")); + GRPC_ERROR_CREATE_FROM_STATIC_STRING("no valid actions configured")); } XdsRoutingLbConfig::RouteTable route_table; it = json.object_value().find("routes"); @@ -654,8 +654,11 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { const Json::Array& array = it->second.array_value(); for (size_t i = 0; i < array.size(); ++i) { XdsRoutingLbConfig::Route route; - std::vector route_errors = ParseRoute(array[i], &route); + std::vector route_errors = + ParseRoute(array[i], action_map, &route, &action_to_be_used); if (!route_errors.empty()) { + // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error + // string is not static in this case. grpc_error* error = GRPC_ERROR_CREATE_FROM_COPIED_STRING( absl::StrCat("field:routes element: ", i, " error").c_str()); for (grpc_error* route_error : route_errors) { @@ -663,30 +666,21 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { } error_list.push_back(error); } - // Validate action exists and mark it as used. - if (action_map.find(route.action) == action_map.end()) { - grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field: routes element: ", i, " error: action ", - route.action, " does not exist") - .c_str()); - error_list.push_back(error); - } - action_in_use.erase(route.action); route_table.emplace_back(std::move(route)); } } - if (route_table.size() == 0) { + if (route_table.empty()) { grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("no valid routes configured"); error_list.push_back(error); } - if (!(route_table[route_table.size() - 1].matcher.service.empty() && - route_table[route_table.size() - 1].matcher.method.empty())) { + if (!route_table.back().matcher.service.empty() || + !route_table.back().matcher.method.empty()) { grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "default route must not contain service or method"); error_list.push_back(error); } - if (!action_in_use.empty()) { + if (!action_to_be_used.empty()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "some actions were not referenced by any route")); } @@ -733,7 +727,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { std::vector error_list; if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName should be of type object")); + "value should be of type object")); return error_list; } // Parse service @@ -758,47 +752,53 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { } if (route_config->service.empty() && !route_config->method.empty()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:methodName error: service is empty when method is " - "not")); + "service is empty when method is not")); } return error_list; } - static std::vector ParseRoute(const Json& json, - XdsRoutingLbConfig::Route* route) { + static std::vector ParseRoute( + const Json& json, const XdsRoutingLbConfig::ActionMap& action_map, + XdsRoutingLbConfig::Route* route, + std::set* action_to_be_used) { std::vector error_list; if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:route element should be of type object")); + "value should be of type object")); return error_list; } // Parse MethodName. auto it = json.object_value().find("methodName"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:routes element: methodName is required")); + "field:methodName error:required field missing")); } else { - std::vector route_errors = + std::vector method_name_errors = ParseMethodName(it->second, &route->matcher); - if (!route_errors.empty()) { - grpc_error* error = - GRPC_ERROR_CREATE_FROM_COPIED_STRING("field:route element error"); - for (grpc_error* route_error : route_errors) { - error = grpc_error_add_child(error, route_error); - } - error_list.push_back(error); + if (!method_name_errors.empty()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_VECTOR( + "field:methodName", &method_name_errors)); } } // Parse action. it = json.object_value().find("action"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:route element: action is required")); + "field:action error:required field missing")); } else if (it->second.type() != Json::Type::STRING) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "field:route element error action should be of type string")); + "field:action error:should be of type string")); } else { route->action = it->second.string_value(); + // Validate action exists and mark it as used. + if (!route->action.empty() && + action_map.find(route->action) == action_map.end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:action error:", route->action, + " does not exist") + .c_str())); + } + action_to_be_used->erase(route->action); } return error_list; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 0ed3753bd91..d74b2ab694f 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1051,12 +1051,6 @@ grpc_error* RouteConfigParse( "Path is not empty string, prefix cannot also be non-empty."); } } - if (i == (size - 1)) { - if (!(rds_route.service.empty() && rds_route.method.empty())) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Default route must have empty service and method"); - } - } if (!envoy_api_v2_route_Route_has_route(route)) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No RouteAction found in route."); @@ -1073,6 +1067,15 @@ grpc_error* RouteConfigParse( rds_route.cluster_name = std::string(action.data, action.size); rds_update->routes.emplace_back(std::move(rds_route)); } + if (rds_update->routes.empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No valid routes specified."); + } else { + if (!rds_update->routes.back().service.empty() || + !rds_update->routes.back().method.empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Default route must have empty service and method"); + } + } return GRPC_ERROR_NONE; } diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 78176ac865a..33e9d17c519 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -903,10 +903,8 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( ? lds_update->route_config_name.c_str() : "")); if (lds_update->rds_update.has_value()) { - gpr_log(GPR_INFO, - " [xds_client %p] LDS update received; LDS RouteConfiguration " - "contains %lu routes", - this, lds_update->rds_update.value().routes.size()); + gpr_log(GPR_INFO, " RouteConfiguration contains %lu routes", this, + lds_update->rds_update.value().routes.size()); for (const auto& route : lds_update->rds_update.value().routes) { gpr_log(GPR_INFO, " route: { service=\"%s\", " @@ -2061,29 +2059,29 @@ void XdsClient::ResetBackoff() { namespace { std::string CreateServiceConfigActionCluster(const std::string& cluster_name) { - return ( - absl::StrFormat(" \"cds:%s\":{\n" - " \"child_policy\":[ {\n" - " \"cds_experimental\":{\n" - " \"cluster\": \"%s\"\n" - " }\n" - " } ]\n" - " }", - cluster_name.c_str(), cluster_name.c_str())); + return absl::StrFormat( + " \"cds:%s\":{\n" + " \"child_policy\":[ {\n" + " \"cds_experimental\":{\n" + " \"cluster\": \"%s\"\n" + " }\n" + " } ]\n" + " }", + cluster_name.c_str(), cluster_name.c_str()); } std::string CreateServiceConfigRoute(const std::string& cluster_name, const std::string& service, const std::string& method) { - return ( - absl::StrFormat(" { \n" - " \"methodName\": {\n" - " \"service\": \"%s\",\n" - " \"method\": \"%s\"\n" - " },\n" - " \"action\": \"cds:%s\"\n" - " }", - service.c_str(), method.c_str(), cluster_name.c_str())); + return absl::StrFormat( + " { \n" + " \"methodName\": {\n" + " \"service\": \"%s\",\n" + " \"method\": \"%s\"\n" + " },\n" + " \"action\": \"cds:%s\"\n" + " }", + service.c_str(), method.c_str(), cluster_name.c_str()); } } // namespace diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 3a4831f7a26..2665f9e9082 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -2312,11 +2312,11 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); - default_route->mutable_match()->set_path(""); default_route->mutable_route()->set_cluster(kDefaultResourceName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); + CheckRpcSendOk(kNumRpcs, 1000, true); CheckEcho1RpcSendOk(kNumRpcs, 1000, true); CheckEcho2RpcSendOk(kNumRpcs, 1000, true); // Make sure RPCs all go to the correct backend. @@ -2330,7 +2330,7 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); EXPECT_EQ(kNumRpcs, backends_[i]->backend2_service()->request_count()); } else { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); + EXPECT_EQ(kNumRpcs / 2, backends_[i]->backend_service()->request_count()); EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); } @@ -2378,7 +2378,6 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { matched_route->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); - default_route->mutable_match()->set_path(""); default_route->mutable_route()->set_cluster(kDefaultResourceName); Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); @@ -2398,73 +2397,6 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { } } -// Tests that LDS client should choose the default route (with no matching -// specified) after unable to find a match with previous routes. -TEST_P(LdsTest, XdsRoutingDefaultRoute) { - ResetStub(0, 0, "", 0, 1); - const char* kNewCluster1Name = "new_cluster_1"; - const char* kNewCluster2Name = "new_cluster_2"; - const size_t kNumRpcs = 10; - SetNextResolution({}); - SetNextResolutionForLbChannelAllBalancers(); - // Populate new EDS resources. - AdsServiceImpl::EdsResourceArgs args({ - {"locality0", GetBackendPorts(0, 2)}, - }); - AdsServiceImpl::EdsResourceArgs args1({ - {"locality0", GetBackendPorts(2, 3)}, - }); - AdsServiceImpl::EdsResourceArgs args2({ - {"locality0", GetBackendPorts(3, 4)}, - }); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), - kNewCluster1Name); - balancers_[0]->ads_service()->SetEdsResource( - AdsServiceImpl::BuildEdsResource(args2, kNewCluster2Name), - kNewCluster2Name); - // Populate new CDS resources. - Cluster new_cluster1 = balancers_[0]->ads_service()->default_cluster(); - new_cluster1.set_name(kNewCluster1Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster1, kNewCluster1Name); - Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); - new_cluster2.set_name(kNewCluster2Name); - balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); - // Change RDS resource to set up prefix matching and path matching that do - // match the traffic, so traffic goes to the default cluster. - RouteConfiguration new_route_config = - balancers_[0]->ads_service()->default_route_config(); - auto* mismatched_route1 = - new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - mismatched_route1->mutable_match()->set_prefix( - "/grpc.testing.EchoTestService0"); - mismatched_route1->mutable_route()->set_cluster(kNewCluster1Name); - auto* mismatched_route2 = - new_route_config.mutable_virtual_hosts(0)->add_routes(); - mismatched_route2->mutable_match()->set_path( - "/grpc.testing.EchoTestService/Echo1"); - mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); - auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - default_route->mutable_match()->set_prefix(""); - default_route->mutable_match()->set_path(""); - default_route->mutable_route()->set_cluster(kDefaultResourceName); - Listener listener = - balancers_[0]->ads_service()->BuildListener(new_route_config); - balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumRpcs); - // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 4; ++i) { - if (i < 2) { - EXPECT_EQ(kNumRpcs / 2, backends_[i]->backend_service()->request_count()); - } else { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - } - } -} - using RdsTest = BasicTest; // Tests that RDS client should send an ACK upon correct RDS response. @@ -2783,7 +2715,7 @@ TEST_P(LocalityMapTest, NoLocalities) { // Tests that the locality map can work properly even when it contains a large // number of localities. -/*TEST_P(LocalityMapTest, StressTest) { +TEST_P(LocalityMapTest, StressTest) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumLocalities = 100; @@ -2807,13 +2739,13 @@ TEST_P(LocalityMapTest, NoLocalities) { AdsServiceImpl::BuildEdsResource(args), 60 * 1000, kDefaultResourceName)); // Wait until backend 0 is ready, before which kNumLocalities localities are // received and handled by the xds policy. - WaitForBackend(0, /*reset_counters=*false); + WaitForBackend(0, /*reset_counters=*/false); EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); // Wait until backend 1 is ready, before which kNumLocalities localities are // removed by the xds policy. WaitForBackend(1); delayed_resource_setter.join(); -}*/ +} // Tests that the localities in a locality map are picked correctly after update // (addition, modification, deletion). From 1ae804c8c01af46ec4df20b4d54dd46e6a9701e3 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Sat, 11 Apr 2020 21:39:40 -0700 Subject: [PATCH 14/37] Fixing code review comments: - Fixing for comments in all except for test file - Multi-purpose test_service_impl.h as oppose to creating new files. --- include/grpc/impl/codegen/grpc_types.h | 5 +- .../lb_policy/xds/xds_routing.cc | 33 +- .../ext/filters/client_channel/xds/xds_api.cc | 8 +- .../filters/client_channel/xds/xds_client.cc | 6 +- test/cpp/end2end/BUILD | 2 +- test/cpp/end2end/test_multiple_service_impl.h | 495 ------------------ test/cpp/end2end/test_service_impl.cc | 380 -------------- test/cpp/end2end/test_service_impl.h | 399 +++++++++++++- test/cpp/end2end/xds_end2end_test.cc | 3 +- 9 files changed, 421 insertions(+), 910 deletions(-) delete mode 100644 test/cpp/end2end/test_multiple_service_impl.h diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index 1e4c9fd0a23..5a73918c0d0 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -358,7 +358,10 @@ typedef struct { * The default is 15 seconds. */ #define GRPC_ARG_XDS_RESOURCE_DOES_NOT_EXIST_TIMEOUT_MS \ "grpc.xds_resource_does_not_exist_timeout_ms" -/* if set, enable xds routing policy */ +/* If set, enable xds routing policy. This boolean argument is currently + * disabled by default; however, it will be changed to enabled by default + * once the functionality proves stable. This arg will eventually + * be removed completely. */ #define GRPC_ARG_XDS_ROUTING_ENABLED "grpc.xds_routing_enabled" /** If non-zero, grpc server's cronet compression workaround will be enabled */ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 7bcc05cc018..40174672e96 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -610,7 +610,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { std::vector error_list; // action map. XdsRoutingLbConfig::ActionMap action_map; - std::set action_to_be_used; + std::set actions_to_be_used; auto it = json.object_value().find("actions"); if (it == json.object_value().end()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -620,6 +620,11 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { "field:actions error:type should be object")); } else { for (const auto& p : it->second.object_value()) { + if (p.first.empty()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "field:actions element error: name cannot be empty")); + continue; + } RefCountedPtr child_config; std::vector child_errors = ParseChildConfig(p.second, &child_config); @@ -634,7 +639,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { error_list.push_back(error); } else { action_map[p.first] = std::move(child_config); - action_to_be_used.insert(p.first); + actions_to_be_used.insert(p.first); } } } @@ -655,7 +660,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { for (size_t i = 0; i < array.size(); ++i) { XdsRoutingLbConfig::Route route; std::vector route_errors = - ParseRoute(array[i], action_map, &route, &action_to_be_used); + ParseRoute(array[i], action_map, &route, &actions_to_be_used); if (!route_errors.empty()) { // Can't use GRPC_ERROR_CREATE_FROM_VECTOR() here, because the error // string is not static in this case. @@ -680,7 +685,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { "default route must not contain service or method"); error_list.push_back(error); } - if (!action_to_be_used.empty()) { + if (!actions_to_be_used.empty()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "some actions were not referenced by any route")); } @@ -760,7 +765,7 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { static std::vector ParseRoute( const Json& json, const XdsRoutingLbConfig::ActionMap& action_map, XdsRoutingLbConfig::Route* route, - std::set* action_to_be_used) { + std::set* actions_to_be_used) { std::vector error_list; if (json.type() != Json::Type::OBJECT) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( @@ -790,15 +795,19 @@ class XdsRoutingLbFactory : public LoadBalancingPolicyFactory { "field:action error:should be of type string")); } else { route->action = it->second.string_value(); - // Validate action exists and mark it as used. - if (!route->action.empty() && - action_map.find(route->action) == action_map.end()) { + if (route->action.empty()) { error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( - absl::StrCat("field:action error:", route->action, - " does not exist") - .c_str())); + "field:action error:cannot be empty")); + } else { + // Validate action exists and mark it as used. + if (action_map.find(route->action) == action_map.end()) { + error_list.push_back(GRPC_ERROR_CREATE_FROM_STATIC_STRING( + absl::StrCat("field:action error:", route->action, + " does not exist") + .c_str())); + } + actions_to_be_used->erase(route->action); } - action_to_be_used->erase(route->action); } return error_list; } diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index d74b2ab694f..855d34a780d 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1013,11 +1013,9 @@ grpc_error* RouteConfigParse( return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "No route found in the virtual host."); } - // If xds_routing is not configured, only look at the last one in the route // list (the default route) - size_t start_index = size - 1; - if (xds_routing_enabled) start_index = 0; + size_t start_index = xds_routing_enabled ? 0 : size - 1; for (size_t i = start_index; i < size; ++i) { const envoy_api_v2_route_Route* route = routes[i]; const envoy_api_v2_route_RouteMatch* match = @@ -1064,6 +1062,10 @@ grpc_error* RouteConfigParse( } const upb_strview action = envoy_api_v2_route_RouteAction_cluster(route_action); + if (action.size == 0) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "RouteAction has empty cluster."); + } rds_route.cluster_name = std::string(action.data, action.size); rds_update->routes.emplace_back(std::move(rds_route)); } diff --git a/src/core/ext/filters/client_channel/xds/xds_client.cc b/src/core/ext/filters/client_channel/xds/xds_client.cc index 33e9d17c519..2f0657496da 100644 --- a/src/core/ext/filters/client_channel/xds/xds_client.cc +++ b/src/core/ext/filters/client_channel/xds/xds_client.cc @@ -903,7 +903,7 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdate( ? lds_update->route_config_name.c_str() : "")); if (lds_update->rds_update.has_value()) { - gpr_log(GPR_INFO, " RouteConfiguration contains %lu routes", this, + gpr_log(GPR_INFO, " RouteConfiguration contains %lu routes", lds_update->rds_update.value().routes.size()); for (const auto& route : lds_update->rds_update.value().routes) { gpr_log(GPR_INFO, @@ -1819,8 +1819,8 @@ grpc_millis GetRequestTimeout(const grpc_channel_args& args) { } bool GetXdsRoutingEnabled(const grpc_channel_args& args) { - return grpc_channel_args_find_integer(&args, GRPC_ARG_XDS_ROUTING_ENABLED, - {0, 0, 1}); + return grpc_channel_args_find_bool(&args, GRPC_ARG_XDS_ROUTING_ENABLED, + false); } } // namespace diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index b8dbf5b4db5..bb6be0c4f02 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -38,7 +38,7 @@ grpc_cc_library( grpc_cc_library( name = "test_multiple_service_impl", testonly = True, - hdrs = ["test_multiple_service_impl.h"], + hdrs = ["test_service_impl.h"], external_deps = [ "gtest", ], diff --git a/test/cpp/end2end/test_multiple_service_impl.h b/test/cpp/end2end/test_multiple_service_impl.h deleted file mode 100644 index 2b06117774c..00000000000 --- a/test/cpp/end2end/test_multiple_service_impl.h +++ /dev/null @@ -1,495 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#ifndef GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H -#define GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "src/proto/grpc/testing/echo.grpc.pb.h" -#include "test/cpp/util/string_ref_helper.h" - -using std::chrono::system_clock; - -namespace grpc { -namespace testing { - -const int kServerDefaultResponseStreamsToSend = 3; -const char* const kServerResponseStreamsToSend = "server_responses_to_send"; -const char* const kServerTryCancelRequest = "server_try_cancel"; -const char* const kDebugInfoTrailerKey = "debug-info-bin"; -const char* const kServerFinishAfterNReads = "server_finish_after_n_reads"; -const char* const kServerUseCoalescingApi = "server_use_coalescing_api"; -const char* const kCheckClientInitialMetadataKey = "custom_client_metadata"; -const char* const kCheckClientInitialMetadataVal = "Value for client metadata"; - -typedef enum { - DO_NOT_CANCEL = 0, - CANCEL_BEFORE_PROCESSING, - CANCEL_DURING_PROCESSING, - CANCEL_AFTER_PROCESSING -} ServerTryCancelRequestPhase; - -namespace { - -// When echo_deadline is requested, deadline seen in the ServerContext is set in -// the response in seconds. -void MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, EchoResponse* response) { - if (request->has_param() && request->param().echo_deadline()) { - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); - if (context->deadline() != system_clock::time_point::max()) { - Timepoint2Timespec(context->deadline(), &deadline); - } - response->mutable_param()->set_request_deadline(deadline.tv_sec); - } -} - -void CheckServerAuthContext( - const experimental::ServerContextBase* context, - const grpc::string& expected_transport_security_type, - const grpc::string& expected_client_identity) { - std::shared_ptr auth_ctx = context->auth_context(); - std::vector tst = - auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); - EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); - if (expected_client_identity.empty()) { - EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); - EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); - EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); - } else { - auto identity = auth_ctx->GetPeerIdentity(); - EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); - EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity, identity[0]); - } -} - -// Returns the number of pairs in metadata that exactly match the given -// key-value pair. Returns -1 if the pair wasn't found. -int MetadataMatchCount( - const std::multimap& metadata, - const grpc::string& key, const grpc::string& value) { - int count = 0; - for (const auto& metadatum : metadata) { - if (ToString(metadatum.first) == key && - ToString(metadatum.second) == value) { - count++; - } - } - return count; -} -} // namespace - -namespace { -int GetIntValueFromMetadataHelper( - const char* key, - const std::multimap& metadata, - int default_value) { - if (metadata.find(key) != metadata.end()) { - std::istringstream iss(ToString(metadata.find(key)->second)); - iss >> default_value; - gpr_log(GPR_INFO, "%s : %d", key, default_value); - } - - return default_value; -} - -int GetIntValueFromMetadata( - const char* key, - const std::multimap& metadata, - int default_value) { - return GetIntValueFromMetadataHelper(key, metadata, default_value); -} - -void ServerTryCancel(ServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - // Now wait until it's really canceled - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000, GPR_TIMESPAN))); - } -} - -} // namespace - -class TestMultipleServiceSignaller { - public: - void ClientWaitUntilRpcStarted() { - std::unique_lock lock(mu_); - cv_rpc_started_.wait(lock, [this] { return rpc_started_; }); - } - void ServerWaitToContinue() { - std::unique_lock lock(mu_); - cv_server_continue_.wait(lock, [this] { return server_should_continue_; }); - } - void SignalClientThatRpcStarted() { - std::unique_lock lock(mu_); - rpc_started_ = true; - cv_rpc_started_.notify_one(); - } - void SignalServerToContinue() { - std::unique_lock lock(mu_); - server_should_continue_ = true; - cv_server_continue_.notify_one(); - } - - private: - std::mutex mu_; - std::condition_variable cv_rpc_started_; - bool rpc_started_ /* GUARDED_BY(mu_) */ = false; - std::condition_variable cv_server_continue_; - bool server_should_continue_ /* GUARDED_BY(mu_) */ = false; -}; - -template -class TestMultipleServiceImpl : public RpcService { - public: - TestMultipleServiceImpl() : signal_client_(false), host_() {} - explicit TestMultipleServiceImpl(const grpc::string& host) - : signal_client_(false), host_(new grpc::string(host)) {} - - Status Echo(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - if (request->has_param() && - request->param().server_notify_client_when_started()) { - signaller_.SignalClientThatRpcStarted(); - signaller_.ServerWaitToContinue(); - } - - // A bit of sleep to make sure that short deadline tests fail - if (request->has_param() && request->param().server_sleep_us() > 0) { - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_micros(request->param().server_sleep_us(), - GPR_TIMESPAN))); - } - - if (request->has_param() && request->param().server_die()) { - gpr_log(GPR_ERROR, "The request should not reach application handler."); - GPR_ASSERT(0); - } - if (request->has_param() && request->param().has_expected_error()) { - const auto& error = request->param().expected_error(); - return Status(static_cast(error.code()), - error.error_message(), error.binary_error_details()); - } - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - if (server_try_cancel > DO_NOT_CANCEL) { - // Since this is a unary RPC, by the time this server handler is called, - // the 'request' message is already read from the client. So the scenarios - // in server_try_cancel don't make much sense. Just cancel the RPC as long - // as server_try_cancel is not DO_NOT_CANCEL - ServerTryCancel(context); - return Status::CANCELLED; - } - - response->set_message(request->message()); - MaybeEchoDeadline(context, request, response); - if (host_) { - response->mutable_param()->set_host(*host_); - } - if (request->has_param() && request->param().client_cancel_after_us()) { - { - std::unique_lock lock(mu_); - signal_client_ = true; - } - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().client_cancel_after_us(), - GPR_TIMESPAN))); - } - return Status::CANCELLED; - } else if (request->has_param() && - request->param().server_cancel_after_us()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().server_cancel_after_us(), - GPR_TIMESPAN))); - return Status::CANCELLED; - } else if (!request->has_param() || - !request->param().skip_cancelled_check()) { - EXPECT_FALSE(context->IsCancelled()); - } - - if (request->has_param() && request->param().echo_metadata_initially()) { - const std::multimap& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddInitialMetadata(ToString(metadatum.first), - ToString(metadatum.second)); - } - } - - if (request->has_param() && request->param().echo_metadata()) { - const std::multimap& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddTrailingMetadata(ToString(metadatum.first), - ToString(metadatum.second)); - } - // Terminate rpc with error and debug info in trailer. - if (request->param().debug_info().stack_entries_size() || - !request->param().debug_info().detail().empty()) { - grpc::string serialized_debug_info = - request->param().debug_info().SerializeAsString(); - context->AddTrailingMetadata(kDebugInfoTrailerKey, - serialized_debug_info); - return Status::CANCELLED; - } - } - if (request->has_param() && - (request->param().expected_client_identity().length() > 0 || - request->param().check_auth_context())) { - CheckServerAuthContext( - context, request->param().expected_transport_security_type(), - request->param().expected_client_identity()); - } - if (request->has_param() && - request->param().response_message_length() > 0) { - response->set_message( - grpc::string(request->param().response_message_length(), '\0')); - } - if (request->has_param() && request->param().echo_peer()) { - response->mutable_param()->set_peer(context->peer()); - } - return Status::OK; - } - - Status Echo1(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); - } - - Status Echo2(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); - } - - Status CheckClientInitialMetadata(ServerContext* context, - const SimpleRequest* /*request*/, - SimpleResponse* /*response*/) { - EXPECT_EQ(MetadataMatchCount(context->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), - 1); - EXPECT_EQ(1u, - context->client_metadata().count(kCheckClientInitialMetadataKey)); - return Status::OK; - } - - // Unimplemented is left unimplemented to test the returned error. - Status RequestStream(ServerContext* context, - ServerReader* reader, - EchoResponse* response) { - // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by - // the server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads - // any message from the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading messages from the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads - // all the messages from the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - response->set_message(""); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - int num_msgs_read = 0; - while (reader->Read(&request)) { - response->mutable_message()->append(request.message()); - } - gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - // Return 'kNumResponseStreamMsgs' messages. - // TODO(yangg) make it generic by adding a parameter into EchoRequest - Status ResponseStream(ServerContext* context, const EchoRequest* request, - ServerWriter* writer) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes - // any messages to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // writing messages to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes - // all the messages to the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - int server_coalescing_api = GetIntValueFromMetadata( - kServerUseCoalescingApi, context->client_metadata(), 0); - - int server_responses_to_send = GetIntValueFromMetadata( - kServerResponseStreamsToSend, context->client_metadata(), - kServerDefaultResponseStreamsToSend); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - EchoResponse response; - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - for (int i = 0; i < server_responses_to_send; i++) { - response.set_message(request->message() + grpc::to_string(i)); - if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { - writer->WriteLast(response, WriteOptions()); - } else { - writer->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - Status BidiStream(ServerContext* context, - ServerReaderWriter* stream) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ - // writes any messages from/to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading/writing messages from/to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server - // reads/writes all messages from/to the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - EchoResponse response; - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - // kServerFinishAfterNReads suggests after how many reads, the server should - // write the last message and send status (coalesced using WriteLast) - int server_write_last = GetIntValueFromMetadata( - kServerFinishAfterNReads, context->client_metadata(), 0); - - int read_counts = 0; - while (stream->Read(&request)) { - read_counts++; - gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); - response.set_message(request.message()); - if (read_counts == server_write_last) { - stream->WriteLast(response, WriteOptions()); - } else { - stream->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; - } - - // Unimplemented is left unimplemented to test the returned error. - bool signal_client() { - std::unique_lock lock(mu_); - return signal_client_; - } - void ClientWaitUntilRpcStarted() { signaller_.ClientWaitUntilRpcStarted(); } - void SignalServerToContinue() { signaller_.SignalServerToContinue(); } - - private: - bool signal_client_; - std::mutex mu_; - TestMultipleServiceSignaller signaller_; - std::unique_ptr host_; -}; - -} // namespace testing -} // namespace grpc - -#endif // GRPC_TEST_CPP_END2END_TEST_MULTIPLE_SERVICE_IMPL_H diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc index ad1592bf7d8..6517c5d6cc1 100644 --- a/test/cpp/end2end/test_service_impl.cc +++ b/test/cpp/end2end/test_service_impl.cc @@ -36,88 +36,6 @@ namespace grpc { namespace testing { namespace { -// When echo_deadline is requested, deadline seen in the ServerContext is set in -// the response in seconds. -void MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, EchoResponse* response) { - if (request->has_param() && request->param().echo_deadline()) { - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); - if (context->deadline() != system_clock::time_point::max()) { - Timepoint2Timespec(context->deadline(), &deadline); - } - response->mutable_param()->set_request_deadline(deadline.tv_sec); - } -} - -void CheckServerAuthContext( - const experimental::ServerContextBase* context, - const grpc::string& expected_transport_security_type, - const grpc::string& expected_client_identity) { - std::shared_ptr auth_ctx = context->auth_context(); - std::vector tst = - auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); - EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); - if (expected_client_identity.empty()) { - EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); - EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); - EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); - } else { - auto identity = auth_ctx->GetPeerIdentity(); - EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); - EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity, identity[0]); - } -} - -// Returns the number of pairs in metadata that exactly match the given -// key-value pair. Returns -1 if the pair wasn't found. -int MetadataMatchCount( - const std::multimap& metadata, - const grpc::string& key, const grpc::string& value) { - int count = 0; - for (const auto& metadatum : metadata) { - if (ToString(metadatum.first) == key && - ToString(metadatum.second) == value) { - count++; - } - } - return count; -} -} // namespace - -namespace { -int GetIntValueFromMetadataHelper( - const char* key, - const std::multimap& metadata, - int default_value) { - if (metadata.find(key) != metadata.end()) { - std::istringstream iss(ToString(metadata.find(key)->second)); - iss >> default_value; - gpr_log(GPR_INFO, "%s : %d", key, default_value); - } - - return default_value; -} - -int GetIntValueFromMetadata( - const char* key, - const std::multimap& metadata, - int default_value) { - return GetIntValueFromMetadataHelper(key, metadata, default_value); -} - -void ServerTryCancel(ServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - // Now wait until it's really canceled - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000, GPR_TIMESPAN))); - } -} - void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { EXPECT_FALSE(context->IsCancelled()); context->TryCancel(); @@ -127,304 +45,6 @@ void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { } // namespace -Status TestServiceImpl::Echo(ServerContext* context, const EchoRequest* request, - EchoResponse* response) { - if (request->has_param() && - request->param().server_notify_client_when_started()) { - signaller_.SignalClientThatRpcStarted(); - signaller_.ServerWaitToContinue(); - } - - // A bit of sleep to make sure that short deadline tests fail - if (request->has_param() && request->param().server_sleep_us() > 0) { - gpr_sleep_until( - gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), - gpr_time_from_micros(request->param().server_sleep_us(), - GPR_TIMESPAN))); - } - - if (request->has_param() && request->param().server_die()) { - gpr_log(GPR_ERROR, "The request should not reach application handler."); - GPR_ASSERT(0); - } - if (request->has_param() && request->param().has_expected_error()) { - const auto& error = request->param().expected_error(); - return Status(static_cast(error.code()), error.error_message(), - error.binary_error_details()); - } - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - if (server_try_cancel > DO_NOT_CANCEL) { - // Since this is a unary RPC, by the time this server handler is called, - // the 'request' message is already read from the client. So the scenarios - // in server_try_cancel don't make much sense. Just cancel the RPC as long - // as server_try_cancel is not DO_NOT_CANCEL - ServerTryCancel(context); - return Status::CANCELLED; - } - - response->set_message(request->message()); - MaybeEchoDeadline(context, request, response); - if (host_) { - response->mutable_param()->set_host(*host_); - } - if (request->has_param() && request->param().client_cancel_after_us()) { - { - std::unique_lock lock(mu_); - signal_client_ = true; - } - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().client_cancel_after_us(), - GPR_TIMESPAN))); - } - return Status::CANCELLED; - } else if (request->has_param() && - request->param().server_cancel_after_us()) { - gpr_sleep_until(gpr_time_add( - gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(request->param().server_cancel_after_us(), - GPR_TIMESPAN))); - return Status::CANCELLED; - } else if (!request->has_param() || - !request->param().skip_cancelled_check()) { - EXPECT_FALSE(context->IsCancelled()); - } - - if (request->has_param() && request->param().echo_metadata_initially()) { - const std::multimap& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddInitialMetadata(ToString(metadatum.first), - ToString(metadatum.second)); - } - } - - if (request->has_param() && request->param().echo_metadata()) { - const std::multimap& client_metadata = - context->client_metadata(); - for (const auto& metadatum : client_metadata) { - context->AddTrailingMetadata(ToString(metadatum.first), - ToString(metadatum.second)); - } - // Terminate rpc with error and debug info in trailer. - if (request->param().debug_info().stack_entries_size() || - !request->param().debug_info().detail().empty()) { - grpc::string serialized_debug_info = - request->param().debug_info().SerializeAsString(); - context->AddTrailingMetadata(kDebugInfoTrailerKey, serialized_debug_info); - return Status::CANCELLED; - } - } - if (request->has_param() && - (request->param().expected_client_identity().length() > 0 || - request->param().check_auth_context())) { - CheckServerAuthContext(context, - request->param().expected_transport_security_type(), - request->param().expected_client_identity()); - } - if (request->has_param() && request->param().response_message_length() > 0) { - response->set_message( - grpc::string(request->param().response_message_length(), '\0')); - } - if (request->has_param() && request->param().echo_peer()) { - response->mutable_param()->set_peer(context->peer()); - } - return Status::OK; -} - -Status TestServiceImpl::Echo1(ServerContext* context, - const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); -} - -Status TestServiceImpl::Echo2(ServerContext* context, - const EchoRequest* request, - EchoResponse* response) { - return Echo(context, request, response); -} - -Status TestServiceImpl::CheckClientInitialMetadata( - ServerContext* context, const SimpleRequest* /*request*/, - SimpleResponse* /*response*/) { - EXPECT_EQ(MetadataMatchCount(context->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), - 1); - EXPECT_EQ(1u, - context->client_metadata().count(kCheckClientInitialMetadataKey)); - return Status::OK; -} - -// Unimplemented is left unimplemented to test the returned error. - -Status TestServiceImpl::RequestStream(ServerContext* context, - ServerReader* reader, - EchoResponse* response) { - // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by - // the server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads - // any message from the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading messages from the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads - // all the messages from the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - response->set_message(""); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - int num_msgs_read = 0; - while (reader->Read(&request)) { - response->mutable_message()->append(request.message()); - } - gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; -} - -// Return 'kNumResponseStreamMsgs' messages. -// TODO(yangg) make it generic by adding a parameter into EchoRequest -Status TestServiceImpl::ResponseStream(ServerContext* context, - const EchoRequest* request, - ServerWriter* writer) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes - // any messages to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // writing messages to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes - // all the messages to the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - int server_coalescing_api = GetIntValueFromMetadata( - kServerUseCoalescingApi, context->client_metadata(), 0); - - int server_responses_to_send = GetIntValueFromMetadata( - kServerResponseStreamsToSend, context->client_metadata(), - kServerDefaultResponseStreamsToSend); - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - EchoResponse response; - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - for (int i = 0; i < server_responses_to_send; i++) { - response.set_message(request->message() + grpc::to_string(i)); - if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { - writer->WriteLast(response, WriteOptions()); - } else { - writer->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; -} - -Status TestServiceImpl::BidiStream( - ServerContext* context, - ServerReaderWriter* stream) { - // If server_try_cancel is set in the metadata, the RPC is cancelled by the - // server by calling ServerContext::TryCancel() depending on the value: - // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ - // writes any messages from/to the client - // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is - // reading/writing messages from/to the client - // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server - // reads/writes all messages from/to the client - int server_try_cancel = GetIntValueFromMetadata( - kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - - EchoRequest request; - EchoResponse response; - - if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - std::thread* server_try_cancel_thd = nullptr; - if (server_try_cancel == CANCEL_DURING_PROCESSING) { - server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); - } - - // kServerFinishAfterNReads suggests after how many reads, the server should - // write the last message and send status (coalesced using WriteLast) - int server_write_last = GetIntValueFromMetadata( - kServerFinishAfterNReads, context->client_metadata(), 0); - - int read_counts = 0; - while (stream->Read(&request)) { - read_counts++; - gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); - response.set_message(request.message()); - if (read_counts == server_write_last) { - stream->WriteLast(response, WriteOptions()); - } else { - stream->Write(response); - } - } - - if (server_try_cancel_thd != nullptr) { - server_try_cancel_thd->join(); - delete server_try_cancel_thd; - return Status::CANCELLED; - } - - if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); - return Status::CANCELLED; - } - - return Status::OK; -} - experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( experimental::CallbackServerContext* context, const EchoRequest* request, EchoResponse* response) { diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index e41359f9655..905f2cbea89 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -15,6 +15,7 @@ * limitations under the License. * */ + #ifndef GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H #define GRPC_TEST_CPP_END2END_TEST_SERVICE_IMPL_H @@ -23,9 +24,19 @@ #include #include +#include +#include +#include #include +#include + +#include +#include #include "src/proto/grpc/testing/echo.grpc.pb.h" +#include "test/cpp/util/string_ref_helper.h" + +using std::chrono::system_clock; namespace grpc { namespace testing { @@ -46,6 +57,92 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; +namespace { + +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } +} + +void CheckServerAuthContext( + const experimental::ServerContextBase* context, + const grpc::string& expected_transport_security_type, + const grpc::string& expected_client_identity) { + std::shared_ptr auth_ctx = context->auth_context(); + std::vector tst = + auth_ctx->FindPropertyValues("transport_security_type"); + EXPECT_EQ(1u, tst.size()); + EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); + if (expected_client_identity.empty()) { + EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); + EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); + EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); + } else { + auto identity = auth_ctx->GetPeerIdentity(); + EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); + EXPECT_EQ(1u, identity.size()); + EXPECT_EQ(expected_client_identity, identity[0]); + } +} + +// Returns the number of pairs in metadata that exactly match the given +// key-value pair. Returns -1 if the pair wasn't found. +int MetadataMatchCount( + const std::multimap& metadata, + const grpc::string& key, const grpc::string& value) { + int count = 0; + for (const auto& metadatum : metadata) { + if (ToString(metadatum.first) == key && + ToString(metadatum.second) == value) { + count++; + } + } + return count; +} +} // namespace + +namespace { +int GetIntValueFromMetadataHelper( + const char* key, + const std::multimap& metadata, + int default_value) { + if (metadata.find(key) != metadata.end()) { + std::istringstream iss(ToString(metadata.find(key)->second)); + iss >> default_value; + gpr_log(GPR_INFO, "%s : %d", key, default_value); + } + + return default_value; +} + +int GetIntValueFromMetadata( + const char* key, + const std::multimap& metadata, + int default_value) { + return GetIntValueFromMetadataHelper(key, metadata, default_value); +} + +void ServerTryCancel(ServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); + // Now wait until it's really canceled + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); + } +} + +} // namespace + class TestServiceSignaller { public: void ClientWaitUntilRpcStarted() { @@ -75,38 +172,309 @@ class TestServiceSignaller { bool server_should_continue_ /* GUARDED_BY(mu_) */ = false; }; -class TestServiceImpl : public ::grpc::testing::EchoTestService::Service { +template +class TestMultipleServiceImpl : public RpcService { public: - TestServiceImpl() : signal_client_(false), host_() {} - explicit TestServiceImpl(const grpc::string& host) + TestMultipleServiceImpl() : signal_client_(false), host_() {} + explicit TestMultipleServiceImpl(const grpc::string& host) : signal_client_(false), host_(new grpc::string(host)) {} Status Echo(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override; + EchoResponse* response) { + if (request->has_param() && + request->param().server_notify_client_when_started()) { + signaller_.SignalClientThatRpcStarted(); + signaller_.ServerWaitToContinue(); + } + + // A bit of sleep to make sure that short deadline tests fail + if (request->has_param() && request->param().server_sleep_us() > 0) { + gpr_sleep_until( + gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), + gpr_time_from_micros(request->param().server_sleep_us(), + GPR_TIMESPAN))); + } + + if (request->has_param() && request->param().server_die()) { + gpr_log(GPR_ERROR, "The request should not reach application handler."); + GPR_ASSERT(0); + } + if (request->has_param() && request->param().has_expected_error()) { + const auto& error = request->param().expected_error(); + return Status(static_cast(error.code()), + error.error_message(), error.binary_error_details()); + } + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + if (server_try_cancel > DO_NOT_CANCEL) { + // Since this is a unary RPC, by the time this server handler is called, + // the 'request' message is already read from the client. So the scenarios + // in server_try_cancel don't make much sense. Just cancel the RPC as long + // as server_try_cancel is not DO_NOT_CANCEL + ServerTryCancel(context); + return Status::CANCELLED; + } + + response->set_message(request->message()); + MaybeEchoDeadline(context, request, response); + if (host_) { + response->mutable_param()->set_host(*host_); + } + if (request->has_param() && request->param().client_cancel_after_us()) { + { + std::unique_lock lock(mu_); + signal_client_ = true; + } + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().client_cancel_after_us(), + GPR_TIMESPAN))); + } + return Status::CANCELLED; + } else if (request->has_param() && + request->param().server_cancel_after_us()) { + gpr_sleep_until(gpr_time_add( + gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(request->param().server_cancel_after_us(), + GPR_TIMESPAN))); + return Status::CANCELLED; + } else if (!request->has_param() || + !request->param().skip_cancelled_check()) { + EXPECT_FALSE(context->IsCancelled()); + } + + if (request->has_param() && request->param().echo_metadata_initially()) { + const std::multimap& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddInitialMetadata(ToString(metadatum.first), + ToString(metadatum.second)); + } + } + + if (request->has_param() && request->param().echo_metadata()) { + const std::multimap& client_metadata = + context->client_metadata(); + for (const auto& metadatum : client_metadata) { + context->AddTrailingMetadata(ToString(metadatum.first), + ToString(metadatum.second)); + } + // Terminate rpc with error and debug info in trailer. + if (request->param().debug_info().stack_entries_size() || + !request->param().debug_info().detail().empty()) { + grpc::string serialized_debug_info = + request->param().debug_info().SerializeAsString(); + context->AddTrailingMetadata(kDebugInfoTrailerKey, + serialized_debug_info); + return Status::CANCELLED; + } + } + if (request->has_param() && + (request->param().expected_client_identity().length() > 0 || + request->param().check_auth_context())) { + CheckServerAuthContext( + context, request->param().expected_transport_security_type(), + request->param().expected_client_identity()); + } + if (request->has_param() && + request->param().response_message_length() > 0) { + response->set_message( + grpc::string(request->param().response_message_length(), '\0')); + } + if (request->has_param() && request->param().echo_peer()) { + response->mutable_param()->set_peer(context->peer()); + } + return Status::OK; + } Status Echo1(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override; + EchoResponse* response) { + return Echo(context, request, response); + } Status Echo2(ServerContext* context, const EchoRequest* request, - EchoResponse* response) override; + EchoResponse* response) { + return Echo(context, request, response); + } Status CheckClientInitialMetadata(ServerContext* context, - const SimpleRequest* request, - SimpleResponse* response) override; + const SimpleRequest* /*request*/, + SimpleResponse* /*response*/) { + EXPECT_EQ(MetadataMatchCount(context->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), + 1); + EXPECT_EQ(1u, + context->client_metadata().count(kCheckClientInitialMetadataKey)); + return Status::OK; + } // Unimplemented is left unimplemented to test the returned error. - Status RequestStream(ServerContext* context, ServerReader* reader, - EchoResponse* response) override; + EchoResponse* response) { + // If 'server_try_cancel' is set in the metadata, the RPC is cancelled by + // the server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads + // any message from the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading messages from the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads + // all the messages from the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + response->set_message(""); + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + int num_msgs_read = 0; + while (reader->Read(&request)) { + response->mutable_message()->append(request.message()); + } + gpr_log(GPR_INFO, "Read: %d messages", num_msgs_read); + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Return 'kNumResponseStreamMsgs' messages. + // TODO(yangg) make it generic by adding a parameter into EchoRequest Status ResponseStream(ServerContext* context, const EchoRequest* request, - ServerWriter* writer) override; + ServerWriter* writer) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server writes + // any messages to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // writing messages to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes + // all the messages to the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - Status BidiStream( - ServerContext* context, - ServerReaderWriter* stream) override; + int server_coalescing_api = GetIntValueFromMetadata( + kServerUseCoalescingApi, context->client_metadata(), 0); + int server_responses_to_send = GetIntValueFromMetadata( + kServerResponseStreamsToSend, context->client_metadata(), + kServerDefaultResponseStreamsToSend); + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + EchoResponse response; + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + for (int i = 0; i < server_responses_to_send; i++) { + response.set_message(request->message() + grpc::to_string(i)); + if (i == server_responses_to_send - 1 && server_coalescing_api != 0) { + writer->WriteLast(response, WriteOptions()); + } else { + writer->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + Status BidiStream(ServerContext* context, + ServerReaderWriter* stream) { + // If server_try_cancel is set in the metadata, the RPC is cancelled by the + // server by calling ServerContext::TryCancel() depending on the value: + // CANCEL_BEFORE_PROCESSING: The RPC is cancelled before the server reads/ + // writes any messages from/to the client + // CANCEL_DURING_PROCESSING: The RPC is cancelled while the server is + // reading/writing messages from/to the client + // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server + // reads/writes all messages from/to the client + int server_try_cancel = GetIntValueFromMetadata( + kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); + + EchoRequest request; + EchoResponse response; + + if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + std::thread* server_try_cancel_thd = nullptr; + if (server_try_cancel == CANCEL_DURING_PROCESSING) { + server_try_cancel_thd = + new std::thread([context] { ServerTryCancel(context); }); + } + + // kServerFinishAfterNReads suggests after how many reads, the server should + // write the last message and send status (coalesced using WriteLast) + int server_write_last = GetIntValueFromMetadata( + kServerFinishAfterNReads, context->client_metadata(), 0); + + int read_counts = 0; + while (stream->Read(&request)) { + read_counts++; + gpr_log(GPR_INFO, "recv msg %s", request.message().c_str()); + response.set_message(request.message()); + if (read_counts == server_write_last) { + stream->WriteLast(response, WriteOptions()); + } else { + stream->Write(response); + } + } + + if (server_try_cancel_thd != nullptr) { + server_try_cancel_thd->join(); + delete server_try_cancel_thd; + return Status::CANCELLED; + } + + if (server_try_cancel == CANCEL_AFTER_PROCESSING) { + ServerTryCancel(context); + return Status::CANCELLED; + } + + return Status::OK; + } + + // Unimplemented is left unimplemented to test the returned error. bool signal_client() { std::unique_lock lock(mu_); return signal_client_; @@ -162,6 +530,9 @@ class CallbackTestServiceImpl std::unique_ptr host_; }; +using TestServiceImpl = + TestMultipleServiceImpl<::grpc::testing::EchoTestService::Service>; + } // namespace testing } // namespace grpc diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 2665f9e9082..2e9d2ae501a 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -57,7 +57,7 @@ #include "test/core/util/port.h" #include "test/core/util/test_config.h" -#include "test/cpp/end2end/test_multiple_service_impl.h" +#include "test/cpp/end2end/test_service_impl.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/xds/ads_for_test.grpc.pb.h" @@ -2316,6 +2316,7 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); + WaitForAllBackends(0, 2); CheckRpcSendOk(kNumRpcs, 1000, true); CheckEcho1RpcSendOk(kNumRpcs, 1000, true); CheckEcho2RpcSendOk(kNumRpcs, 1000, true); From ddb98d6b52b6ad74674186b8a7b2b1bb9e4a02a6 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Sun, 12 Apr 2020 16:28:39 -0700 Subject: [PATCH 15/37] Fixing code review comments: - added more tests and improved structuring of backend service - fixing grpc_tool tests (due to adding of new test serices/methods --- .../ext/filters/client_channel/xds/xds_api.cc | 32 ++-- test/cpp/end2end/xds_end2end_test.cc | 162 +++++++++--------- test/cpp/util/grpc_tool_test.cc | 10 +- 3 files changed, 108 insertions(+), 96 deletions(-) diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 855d34a780d..ec3238682e4 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1021,21 +1021,25 @@ grpc_error* RouteConfigParse( const envoy_api_v2_route_RouteMatch* match = envoy_api_v2_route_Route_match(route); XdsApi::RdsRoute rds_route; - const upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); - const upb_strview path = envoy_api_v2_route_RouteMatch_path(match); - if (prefix.size > 0) { - std::vector prefix_elements = absl::StrSplit( - absl::string_view(prefix.data, prefix.size).substr(1), '/'); - if (prefix_elements.size() != 1) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix not in the required format of /service/"); + upb_strview prefix; + upb_strview path; + if (envoy_api_v2_route_RouteMatch_has_prefix(match)) { + upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); + if (prefix.size > 0) { + std::vector prefix_elements = absl::StrSplit( + absl::string_view(prefix.data, prefix.size).substr(1), '/'); + if (prefix_elements.size() != 1) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix not in the required format of /service/"); + } + rds_route.service = std::string(prefix_elements[0]); } - rds_route.service = std::string(prefix_elements[0]); - if (path.size > 0) { + } else if (envoy_api_v2_route_RouteMatch_has_path(match)) { + upb_strview path = envoy_api_v2_route_RouteMatch_path(match); + if (path.size == 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix is not empty string, path cannot also be non-empty."); + "Path if set cannot be empty"); } - } else if (path.size > 0) { std::vector path_elements = absl::StrSplit( absl::string_view(path.data, path.size).substr(1), '/'); if (path_elements.size() != 2) { @@ -1044,10 +1048,6 @@ grpc_error* RouteConfigParse( } rds_route.service = std::string(path_elements[0]); rds_route.method = std::string(path_elements[1]); - if (prefix.size > 0) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Path is not empty string, prefix cannot also be non-empty."); - } } if (!envoy_api_v2_route_Route_has_route(route)) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 2e9d2ae501a..f08702f623e 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -264,40 +264,12 @@ class BackendServiceImpl Status Echo1(ServerContext* context, const EchoRequest* request, EchoResponse* response) override { - // Backend should receive the call credentials metadata. - auto call_credentials_entry = - context->client_metadata().find(g_kCallCredsMdKey); - EXPECT_NE(call_credentials_entry, context->client_metadata().end()); - if (call_credentials_entry != context->client_metadata().end()) { - EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); - } - CountedService< - TestMultipleServiceImpl>::IncreaseResponseCount(); - const auto status = - TestMultipleServiceImpl::Echo1(context, request, response); - CountedService< - TestMultipleServiceImpl>::IncreaseResponseCount(); - AddClient(context->peer()); - return status; + return (Echo(context, request, response)); } Status Echo2(ServerContext* context, const EchoRequest* request, EchoResponse* response) override { - // Backend should receive the call credentials metadata. - auto call_credentials_entry = - context->client_metadata().find(g_kCallCredsMdKey); - EXPECT_NE(call_credentials_entry, context->client_metadata().end()); - if (call_credentials_entry != context->client_metadata().end()) { - EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue); - } - CountedService< - TestMultipleServiceImpl>::IncreaseResponseCount(); - const auto status = - TestMultipleServiceImpl::Echo2(context, request, response); - CountedService< - TestMultipleServiceImpl>::IncreaseResponseCount(); - AddClient(context->peer()); - return status; + return (Echo(context, request, response)); } void Start() {} @@ -1188,7 +1160,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { void ResetStub(int fallback_timeout = 0, int failover_timeout = 0, const grpc::string& expected_targets = "", int xds_resource_does_not_exist_timeout = 0, - int xds_routing_enabled = false) { + bool xds_routing_enabled = false) { ChannelArguments args; // TODO(juanlishen): Add setter to ChannelArguments. if (fallback_timeout > 0) { @@ -1557,31 +1529,31 @@ class XdsEnd2endTest : public ::testing::TestWithParam { return &backend_service_; } BackendServiceImpl<::grpc::testing::EchoTest1Service::Service>* - backend1_service() { - return &backend1_service_; + backend_service1() { + return &backend_service1_; } BackendServiceImpl<::grpc::testing::EchoTest2Service::Service>* - backend2_service() { - return &backend2_service_; + backend_service2() { + return &backend_service2_; } private: void RegisterAllServices(ServerBuilder* builder) override { builder->RegisterService(&backend_service_); - builder->RegisterService(&backend1_service_); - builder->RegisterService(&backend2_service_); + builder->RegisterService(&backend_service1_); + builder->RegisterService(&backend_service2_); } void StartAllServices() override { backend_service_.Start(); - backend1_service_.Start(); - backend2_service_.Start(); + backend_service1_.Start(); + backend_service2_.Start(); } void ShutdownAllServices() override { backend_service_.Shutdown(); - backend1_service_.Shutdown(); - backend2_service_.Shutdown(); + backend_service1_.Shutdown(); + backend_service2_.Shutdown(); } const char* Type() override { return "Backend"; } @@ -1589,9 +1561,9 @@ class XdsEnd2endTest : public ::testing::TestWithParam { BackendServiceImpl<::grpc::testing::EchoTestService::Service> backend_service_; BackendServiceImpl<::grpc::testing::EchoTest1Service::Service> - backend1_service_; + backend_service1_; BackendServiceImpl<::grpc::testing::EchoTest2Service::Service> - backend2_service_; + backend_service2_; }; class BalancerServerThread : public ServerThread { @@ -2220,6 +2192,24 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { AdsServiceImpl::NACKED); } +// Tests that LDS client should send a NACK if route match has empty path +// as the only route (default) in the LDS response. +TEST_P(LdsTest, RouteMatchHasEmptyPath) { + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + route_config.mutable_virtual_hosts(0) + ->mutable_routes(0) + ->mutable_match() + ->set_path(""); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); +} + // Tests that LDS client should send a NACK if route has an action other than // RouteAction in the LDS response. TEST_P(LdsTest, RouteHasNoRouteAction) { @@ -2265,10 +2255,15 @@ TEST_P(LdsTest, Timeout) { // Tests that LDS client should choose the default route (with no matching // specified) after unable to find a match with previous routes. TEST_P(LdsTest, XdsRoutingPathMatching) { - ResetStub(0, 0, "", 0, 1); + ResetStub(/*fallback_timeout=*/0, /*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; - const size_t kNumRpcs = 10; + const size_t kNumEcho1Rpcs = 10; + const size_t kNumEcho2Rpcs = 20; + const size_t kNumEchoRpcs = 30; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); // Populate new EDS resources. @@ -2317,41 +2312,48 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumRpcs, 1000, true); - CheckEcho1RpcSendOk(kNumRpcs, 1000, true); - CheckEcho2RpcSendOk(kNumRpcs, 1000, true); + CheckRpcSendOk(kNumEchoRpcs, 1000, true); + CheckEcho1RpcSendOk(kNumEcho1Rpcs, 1000, true); + CheckEcho2RpcSendOk(kNumEcho2Rpcs, 1000, true); // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 4; ++i) { - if (i == 2) { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(kNumRpcs, backends_[i]->backend1_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); - } else if (i == 3) { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); - EXPECT_EQ(kNumRpcs, backends_[i]->backend2_service()->request_count()); - } else { - EXPECT_EQ(kNumRpcs / 2, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); - } + for (size_t i = 0; i < 2; ++i) { + EXPECT_EQ(kNumEchoRpcs / 2, + backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); } + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); } TEST_P(LdsTest, XdsRoutingPrefixMatching) { - ResetStub(0, 0, "", 0, 1); + ResetStub(/*fallback_timeout=*/0, /*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); const char* kNewCluster1Name = "new_cluster_1"; const char* kNewCluster2Name = "new_cluster_2"; - const size_t kNumRpcs = 10; + const size_t kNumEcho1Rpcs = 10; + const size_t kNumEcho2Rpcs = 20; + const size_t kNumEchoRpcs = 30; SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); // Populate new EDS resources. + AdsServiceImpl::EdsResourceArgs args({ + {"locality0", GetBackendPorts(0, 2)}, + }); AdsServiceImpl::EdsResourceArgs args1({ {"locality0", GetBackendPorts(2, 3)}, }); AdsServiceImpl::EdsResourceArgs args2({ {"locality0", GetBackendPorts(3, 4)}, }); + balancers_[0]->ads_service()->SetEdsResource( + AdsServiceImpl::BuildEdsResource(args), kDefaultResourceName); balancers_[0]->ads_service()->SetEdsResource( AdsServiceImpl::BuildEdsResource(args1, kNewCluster1Name), kNewCluster1Name); @@ -2372,10 +2374,10 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { auto* mismatched_route = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); mismatched_route->mutable_match()->set_prefix( - "/grpc.testing.EchoTestService"); + "/grpc.testing.EchoTest1Service"); mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); + matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service"); matched_route->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2383,19 +2385,23 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { Listener listener = balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); - CheckEcho1RpcSendOk(kNumRpcs, 1000, true); + WaitForAllBackends(0, 2); + CheckRpcSendOk(kNumEchoRpcs, 1000, true); + CheckEcho1RpcSendOk(kNumEcho1Rpcs, 1000, true); + CheckEcho2RpcSendOk(kNumEcho2Rpcs, 1000, true); // Make sure RPCs all go to the correct backend. - for (size_t i = 0; i < 4; ++i) { - if (i == 3) { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(kNumRpcs, backends_[i]->backend1_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); - } else { - EXPECT_EQ(0, backends_[i]->backend_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend1_service()->request_count()); - EXPECT_EQ(0, backends_[i]->backend2_service()->request_count()); - } - } + for (size_t i = 0; i < 2; ++i) { + EXPECT_EQ(kNumEchoRpcs / 2, + backends_[i]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[i]->backend_service2()->request_count()); + } + EXPECT_EQ(0, backends_[2]->backend_service()->request_count()); + EXPECT_EQ(kNumEcho1Rpcs, backends_[2]->backend_service1()->request_count()); + EXPECT_EQ(0, backends_[2]->backend_service2()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service()->request_count()); + EXPECT_EQ(0, backends_[3]->backend_service1()->request_count()); + EXPECT_EQ(kNumEcho2Rpcs, backends_[3]->backend_service2()->request_count()); } using RdsTest = BasicTest; diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index e44ada46c24..e138a3d9e16 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -48,6 +48,8 @@ using grpc::testing::EchoResponse; #define ECHO_TEST_SERVICE_SUMMARY \ "Echo\n" \ + "Echo1\n" \ + "Echo2\n" \ "CheckClientInitialMetadata\n" \ "RequestStream\n" \ "ResponseStream\n" \ @@ -60,6 +62,10 @@ using grpc::testing::EchoResponse; "service EchoTestService {\n" \ " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ "{}\n" \ + " rpc Echo1(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ + " rpc Echo2(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ " rpc CheckClientInitialMetadata(grpc.testing.SimpleRequest) returns " \ "(grpc.testing.SimpleResponse) {}\n" \ " rpc RequestStream(stream grpc.testing.EchoRequest) returns " \ @@ -1101,7 +1107,7 @@ TEST_F(GrpcToolTest, CallCommandWithMetadata) { ShutdownServer(); } -TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { +/*TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { // Test input "grpc_cli call localhost:10000 Echo "message: 'Hello'" const char* argv[] = {"grpc_cli", "call", "localhost:10000", "Echo", "message: 'Hello'"}; @@ -1137,7 +1143,7 @@ TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { FLAGS_protofiles = ""; gpr_free(test_srcdir); -} +}*/ TEST_F(GrpcToolTest, ListCommand_OverrideSslHostName) { const grpc::string server_address = SetUpServer(true); From d76fc080c401247f1b49e2749f80d005edfc9936 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 10:32:23 -0700 Subject: [PATCH 16/37] Fixing build warnings getting ready for merge. --- BUILD | 2 +- test/cpp/util/grpc_tool_test.cc | 42 ++++++++++++++++----------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/BUILD b/BUILD index 908c7ebb814..e0379fc1a2b 100644 --- a/BUILD +++ b/BUILD @@ -1458,7 +1458,7 @@ grpc_cc_library( name = "grpc_lb_policy_xds_routing", srcs = [ "src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc", - ], + ], language = "c++", deps = [ "grpc_base", diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index e138a3d9e16..d8ce3304560 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -48,35 +48,35 @@ using grpc::testing::EchoResponse; #define ECHO_TEST_SERVICE_SUMMARY \ "Echo\n" \ - "Echo1\n" \ - "Echo2\n" \ + "Echo1\n" \ + "Echo2\n" \ "CheckClientInitialMetadata\n" \ "RequestStream\n" \ "ResponseStream\n" \ "BidiStream\n" \ "Unimplemented\n" -#define ECHO_TEST_SERVICE_DESCRIPTION \ - "filename: src/proto/grpc/testing/echo.proto\n" \ - "package: grpc.testing;\n" \ - "service EchoTestService {\n" \ - " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ +#define ECHO_TEST_SERVICE_DESCRIPTION \ + "filename: src/proto/grpc/testing/echo.proto\n" \ + "package: grpc.testing;\n" \ + "service EchoTestService {\n" \ + " rpc Echo(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ + "{}\n" \ " rpc Echo1(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ + "{}\n" \ " rpc Echo2(grpc.testing.EchoRequest) returns (grpc.testing.EchoResponse) " \ - "{}\n" \ - " rpc CheckClientInitialMetadata(grpc.testing.SimpleRequest) returns " \ - "(grpc.testing.SimpleResponse) {}\n" \ - " rpc RequestStream(stream grpc.testing.EchoRequest) returns " \ - "(grpc.testing.EchoResponse) {}\n" \ - " rpc ResponseStream(grpc.testing.EchoRequest) returns (stream " \ - "grpc.testing.EchoResponse) {}\n" \ - " rpc BidiStream(stream grpc.testing.EchoRequest) returns (stream " \ - "grpc.testing.EchoResponse) {}\n" \ - " rpc Unimplemented(grpc.testing.EchoRequest) returns " \ - "(grpc.testing.EchoResponse) {}\n" \ - "}\n" \ + "{}\n" \ + " rpc CheckClientInitialMetadata(grpc.testing.SimpleRequest) returns " \ + "(grpc.testing.SimpleResponse) {}\n" \ + " rpc RequestStream(stream grpc.testing.EchoRequest) returns " \ + "(grpc.testing.EchoResponse) {}\n" \ + " rpc ResponseStream(grpc.testing.EchoRequest) returns (stream " \ + "grpc.testing.EchoResponse) {}\n" \ + " rpc BidiStream(stream grpc.testing.EchoRequest) returns (stream " \ + "grpc.testing.EchoResponse) {}\n" \ + " rpc Unimplemented(grpc.testing.EchoRequest) returns " \ + "(grpc.testing.EchoResponse) {}\n" \ + "}\n" \ "\n" #define ECHO_METHOD_DESCRIPTION \ From 4fae974ffae4bf31dc9d6d629c42aa8e6b73f1e6 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 14:41:42 -0700 Subject: [PATCH 17/37] A merge error was missed! --- build_autogenerated.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 56b0b288ad6..9d7005254e2 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -1592,13 +1592,10 @@ libs: - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc - src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc -<<<<<<< HEAD - - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc -======= - src/core/ext/filters/client_channel/lb_policy/xds/eds.cc - src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc ->>>>>>> upstream/master + - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc - src/core/ext/filters/client_channel/lb_policy_registry.cc - src/core/ext/filters/client_channel/local_subchannel_pool.cc - src/core/ext/filters/client_channel/parse_address.cc From 6a1ea0bacc4151c8e38c62b5cb040cd827b96ee8 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 14:52:46 -0700 Subject: [PATCH 18/37] Another Merge error --- CMakeLists.txt | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d23e3425c02..23fe27d0942 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1988,13 +1988,10 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc -<<<<<<< HEAD - src/core/ext/filters/client_channel/lb_policy/xds/xds.cc src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc -======= src/core/ext/filters/client_channel/lb_policy/xds/eds.cc src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc ->>>>>>> upstream/master + src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc src/core/ext/filters/client_channel/lb_policy_registry.cc src/core/ext/filters/client_channel/local_subchannel_pool.cc src/core/ext/filters/client_channel/parse_address.cc From 6864760acbdf106aa2236c1e70da6c1fad16cd0a Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 15:40:57 -0700 Subject: [PATCH 19/37] Remove unused variables. --- src/core/ext/filters/client_channel/xds/xds_api.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index ec3238682e4..8a19d2b7dae 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1021,8 +1021,6 @@ grpc_error* RouteConfigParse( const envoy_api_v2_route_RouteMatch* match = envoy_api_v2_route_Route_match(route); XdsApi::RdsRoute rds_route; - upb_strview prefix; - upb_strview path; if (envoy_api_v2_route_RouteMatch_has_prefix(match)) { upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); if (prefix.size > 0) { From 73fc26d273823256316c743a0a2366fc572bfbdb Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 16:02:13 -0700 Subject: [PATCH 20/37] Checking in result of sudo ./tools/buildgen/generate_projects.sh --- CMakeLists.txt | 2 -- Makefile | 4 ---- build_autogenerated.yaml | 2 -- 3 files changed, 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 23fe27d0942..95b67085e6a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1988,7 +1988,6 @@ add_library(grpc_unsecure src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc src/core/ext/filters/client_channel/lb_policy/xds/eds.cc src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -14453,7 +14452,6 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.pb.h ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h - test/cpp/end2end/test_service_impl.cc test/cpp/end2end/xds_end2end_test.cc third_party/googletest/googletest/src/gtest-all.cc third_party/googletest/googlemock/src/gmock-all.cc diff --git a/Makefile b/Makefile index 788d076b89e..5c143303bda 100644 --- a/Makefile +++ b/Makefile @@ -19036,7 +19036,6 @@ XDS_END2END_TEST_SRC = \ $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc \ $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc \ $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc \ - test/cpp/end2end/test_service_impl.cc \ test/cpp/end2end/xds_end2end_test.cc \ XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC)))) @@ -19086,8 +19085,6 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lds_rds_for_test.o: $(LIBDIR)/$( $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lrs_for_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - $(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep) @@ -19097,7 +19094,6 @@ ifneq ($(NO_DEPS),true) -include $(XDS_END2END_TEST_OBJS:.o=.dep) endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc $(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 9d7005254e2..662af21ae8d 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -1592,7 +1592,6 @@ libs: - src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc - src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc - src/core/ext/filters/client_channel/lb_policy/xds/cds.cc - - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc - src/core/ext/filters/client_channel/lb_policy/xds/eds.cc - src/core/ext/filters/client_channel/lb_policy/xds/lrs.cc - src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -7560,7 +7559,6 @@ targets: - src/proto/grpc/testing/xds/eds_for_test.proto - src/proto/grpc/testing/xds/lds_rds_for_test.proto - src/proto/grpc/testing/xds/lrs_for_test.proto - - test/cpp/end2end/test_service_impl.cc - test/cpp/end2end/xds_end2end_test.cc deps: - grpc++_test_util From f54891aff8841b7ccd4ad02bc720079a42b0ba3b Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 18:11:10 -0700 Subject: [PATCH 21/37] Fixing a build error. --- .../ext/filters/client_channel/lb_policy/xds/xds_routing.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 40174672e96..473870eacac 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -215,7 +215,7 @@ class XdsRoutingLb : public LoadBalancingPolicy { XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { absl::string_view path; - for (const auto& p : *(args.initial_metadata)) { + for (const auto p : *(args.initial_metadata)) { if (p.first == ":path") { path = p.second; break; From 6ec6c24dc13bc0a87a76853475b19e380565a45e Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Mon, 13 Apr 2020 22:16:42 -0700 Subject: [PATCH 22/37] Fixing code review comments. --- .../ext/filters/client_channel/xds/xds_api.cc | 4 + test/cpp/end2end/test_service_impl.h | 168 +++++++++--------- test/cpp/end2end/xds_end2end_test.cc | 48 +++-- test/cpp/util/grpc_tool_test.cc | 7 +- 4 files changed, 110 insertions(+), 117 deletions(-) diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 8a19d2b7dae..13d5b0bd4d2 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1046,6 +1046,10 @@ grpc_error* RouteConfigParse( } rds_route.service = std::string(path_elements[0]); rds_route.method = std::string(path_elements[1]); + } else { + // TODO(donnadionne): We may change this behavior once we decide how to + // handle unsupported fields. + continue; } if (!envoy_api_v2_route_Route_has_route(route)) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 905f2cbea89..1fed3e55d61 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -57,92 +57,6 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; -namespace { - -// When echo_deadline is requested, deadline seen in the ServerContext is set in -// the response in seconds. -void MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, EchoResponse* response) { - if (request->has_param() && request->param().echo_deadline()) { - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); - if (context->deadline() != system_clock::time_point::max()) { - Timepoint2Timespec(context->deadline(), &deadline); - } - response->mutable_param()->set_request_deadline(deadline.tv_sec); - } -} - -void CheckServerAuthContext( - const experimental::ServerContextBase* context, - const grpc::string& expected_transport_security_type, - const grpc::string& expected_client_identity) { - std::shared_ptr auth_ctx = context->auth_context(); - std::vector tst = - auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); - EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); - if (expected_client_identity.empty()) { - EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); - EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); - EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); - } else { - auto identity = auth_ctx->GetPeerIdentity(); - EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); - EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity, identity[0]); - } -} - -// Returns the number of pairs in metadata that exactly match the given -// key-value pair. Returns -1 if the pair wasn't found. -int MetadataMatchCount( - const std::multimap& metadata, - const grpc::string& key, const grpc::string& value) { - int count = 0; - for (const auto& metadatum : metadata) { - if (ToString(metadatum.first) == key && - ToString(metadatum.second) == value) { - count++; - } - } - return count; -} -} // namespace - -namespace { -int GetIntValueFromMetadataHelper( - const char* key, - const std::multimap& metadata, - int default_value) { - if (metadata.find(key) != metadata.end()) { - std::istringstream iss(ToString(metadata.find(key)->second)); - iss >> default_value; - gpr_log(GPR_INFO, "%s : %d", key, default_value); - } - - return default_value; -} - -int GetIntValueFromMetadata( - const char* key, - const std::multimap& metadata, - int default_value) { - return GetIntValueFromMetadataHelper(key, metadata, default_value); -} - -void ServerTryCancel(ServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - // Now wait until it's really canceled - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000, GPR_TIMESPAN))); - } -} - -} // namespace - class TestServiceSignaller { public: void ClientWaitUntilRpcStarted() { @@ -179,6 +93,87 @@ class TestMultipleServiceImpl : public RpcService { explicit TestMultipleServiceImpl(const grpc::string& host) : signal_client_(false), host_(new grpc::string(host)) {} + // When echo_deadline is requested, deadline seen in the ServerContext is set + // in the response in seconds. + void static MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, + EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } + } + + void static CheckServerAuthContext( + const experimental::ServerContextBase* context, + const grpc::string& expected_transport_security_type, + const grpc::string& expected_client_identity) { + std::shared_ptr auth_ctx = context->auth_context(); + std::vector tst = + auth_ctx->FindPropertyValues("transport_security_type"); + EXPECT_EQ(1u, tst.size()); + EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); + if (expected_client_identity.empty()) { + EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); + EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); + EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); + } else { + auto identity = auth_ctx->GetPeerIdentity(); + EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); + EXPECT_EQ(1u, identity.size()); + EXPECT_EQ(expected_client_identity, identity[0]); + } + } + + // Returns the number of pairs in metadata that exactly match the given + // key-value pair. Returns -1 if the pair wasn't found. + int static MetadataMatchCount( + const std::multimap& metadata, + const grpc::string& key, const grpc::string& value) { + int count = 0; + for (const auto& metadatum : metadata) { + if (ToString(metadatum.first) == key && + ToString(metadatum.second) == value) { + count++; + } + } + return count; + } + + int static GetIntValueFromMetadataHelper( + const char* key, + const std::multimap& metadata, + int default_value) { + if (metadata.find(key) != metadata.end()) { + std::istringstream iss(ToString(metadata.find(key)->second)); + iss >> default_value; + gpr_log(GPR_INFO, "%s : %d", key, default_value); + } + + return default_value; + } + + int static GetIntValueFromMetadata( + const char* key, + const std::multimap& metadata, + int default_value) { + return GetIntValueFromMetadataHelper(key, metadata, default_value); + } + + void static ServerTryCancel(ServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); + // Now wait until it's really canceled + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); + } + } + Status Echo(ServerContext* context, const EchoRequest* request, EchoResponse* response) { if (request->has_param() && @@ -311,6 +306,7 @@ class TestMultipleServiceImpl : public RpcService { } // Unimplemented is left unimplemented to test the returned error. + Status RequestStream(ServerContext* context, ServerReader* reader, EchoResponse* response) { diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index f08702f623e..0b1a474d265 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -264,12 +264,12 @@ class BackendServiceImpl Status Echo1(ServerContext* context, const EchoRequest* request, EchoResponse* response) override { - return (Echo(context, request, response)); + return Echo(context, request, response); } Status Echo2(ServerContext* context, const EchoRequest* request, EchoResponse* response) override { - return (Echo(context, request, response)); + return Echo(context, request, response); } void Start() {} @@ -2197,10 +2197,10 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { TEST_P(LdsTest, RouteMatchHasEmptyPath) { RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); - route_config.mutable_virtual_hosts(0) - ->mutable_routes(0) - ->mutable_match() - ->set_path(""); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path(""); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); SetNextResolution({}); @@ -2291,20 +2291,15 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); new_cluster2.set_name(kNewCluster2Name); balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); - // Change RDS resource to set up prefix matching to direct traffic to the - // second new cluster. + // Populating Route Configurations for LDS. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - auto* mismatched_route1 = - new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - mismatched_route1->mutable_match()->set_path( - "/grpc.testing.EchoTest1Service/Echo1"); - mismatched_route1->mutable_route()->set_cluster(kNewCluster1Name); - auto* mismatched_route2 = - new_route_config.mutable_virtual_hosts(0)->add_routes(); - mismatched_route2->mutable_match()->set_path( - "/grpc.testing.EchoTest2Service/Echo2"); - mismatched_route2->mutable_route()->set_cluster(kNewCluster2Name); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_path("/grpc.testing.EchoTest2Service/Echo2"); + route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); default_route->mutable_route()->set_cluster(kDefaultResourceName); @@ -2367,18 +2362,15 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { Cluster new_cluster2 = balancers_[0]->ads_service()->default_cluster(); new_cluster2.set_name(kNewCluster2Name); balancers_[0]->ads_service()->SetCdsResource(new_cluster2, kNewCluster2Name); - // Change RDS resource to set up prefix matching to direct traffic to the - // second new cluster. + // Populating Route Configurations for LDS. RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); - auto* mismatched_route = - new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - mismatched_route->mutable_match()->set_prefix( - "/grpc.testing.EchoTest1Service"); - mismatched_route->mutable_route()->set_cluster(kNewCluster1Name); - auto* matched_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); - matched_route->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service"); - matched_route->mutable_route()->set_cluster(kNewCluster2Name); + auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); + route1->mutable_route()->set_cluster(kNewCluster1Name); + auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); + route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service"); + route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); default_route->mutable_route()->set_cluster(kDefaultResourceName); diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index d8ce3304560..b353033f874 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -1107,9 +1107,10 @@ TEST_F(GrpcToolTest, CallCommandWithMetadata) { ShutdownServer(); } -/*TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { +TEST_F(GrpcToolTest, CallCommandWithBadMetadata) { // Test input "grpc_cli call localhost:10000 Echo "message: 'Hello'" - const char* argv[] = {"grpc_cli", "call", "localhost:10000", "Echo", + const char* argv[] = {"grpc_cli", "call", "localhost:10000", + "grpc.testing.EchoTestService.Echo", "message: 'Hello'"}; FLAGS_protofiles = "src/proto/grpc/testing/echo.proto"; char* test_srcdir = gpr_getenv("TEST_SRCDIR"); @@ -1143,7 +1144,7 @@ TEST_F(GrpcToolTest, CallCommandWithMetadata) { FLAGS_protofiles = ""; gpr_free(test_srcdir); -}*/ +} TEST_F(GrpcToolTest, ListCommand_OverrideSslHostName) { const grpc::string server_address = SetUpServer(true); From 019b2069268375bf386333ad7de2251ebd2f538f Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 00:04:31 -0700 Subject: [PATCH 23/37] Fixing CR comments and build errors. --- test/cpp/end2end/test_service_impl.h | 162 +++++++++++++-------------- test/cpp/end2end/xds_end2end_test.cc | 107 ++++++------------ 2 files changed, 114 insertions(+), 155 deletions(-) diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 1fed3e55d61..4e12201bacd 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -57,6 +57,86 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } +} + +void CheckServerAuthContext( + const experimental::ServerContextBase* context, + const grpc::string& expected_transport_security_type, + const grpc::string& expected_client_identity) { + std::shared_ptr auth_ctx = context->auth_context(); + std::vector tst = + auth_ctx->FindPropertyValues("transport_security_type"); + EXPECT_EQ(1u, tst.size()); + EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); + if (expected_client_identity.empty()) { + EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); + EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); + EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); + } else { + auto identity = auth_ctx->GetPeerIdentity(); + EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); + EXPECT_EQ(1u, identity.size()); + EXPECT_EQ(expected_client_identity, identity[0]); + } +} + +// Returns the number of pairs in metadata that exactly match the given +// key-value pair. Returns -1 if the pair wasn't found. +int MetadataMatchCount( + const std::multimap& metadata, + const grpc::string& key, const grpc::string& value) { + int count = 0; + for (const auto& metadatum : metadata) { + if (ToString(metadatum.first) == key && + ToString(metadatum.second) == value) { + count++; + } + } + return count; +} + +int GetIntValueFromMetadataHelper( + const char* key, + const std::multimap& metadata, + int default_value) { + if (metadata.find(key) != metadata.end()) { + std::istringstream iss(ToString(metadata.find(key)->second)); + iss >> default_value; + gpr_log(GPR_INFO, "%s : %d", key, default_value); + } + + return default_value; +} + +int GetIntValueFromMetadata( + const char* key, + const std::multimap& metadata, + int default_value) { + return GetIntValueFromMetadataHelper(key, metadata, default_value); +} + +void ServerTryCancel(ServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); + // Now wait until it's really canceled + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); + } +} + class TestServiceSignaller { public: void ClientWaitUntilRpcStarted() { @@ -93,87 +173,6 @@ class TestMultipleServiceImpl : public RpcService { explicit TestMultipleServiceImpl(const grpc::string& host) : signal_client_(false), host_(new grpc::string(host)) {} - // When echo_deadline is requested, deadline seen in the ServerContext is set - // in the response in seconds. - void static MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, - EchoResponse* response) { - if (request->has_param() && request->param().echo_deadline()) { - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); - if (context->deadline() != system_clock::time_point::max()) { - Timepoint2Timespec(context->deadline(), &deadline); - } - response->mutable_param()->set_request_deadline(deadline.tv_sec); - } - } - - void static CheckServerAuthContext( - const experimental::ServerContextBase* context, - const grpc::string& expected_transport_security_type, - const grpc::string& expected_client_identity) { - std::shared_ptr auth_ctx = context->auth_context(); - std::vector tst = - auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); - EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); - if (expected_client_identity.empty()) { - EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); - EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); - EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); - } else { - auto identity = auth_ctx->GetPeerIdentity(); - EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); - EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity, identity[0]); - } - } - - // Returns the number of pairs in metadata that exactly match the given - // key-value pair. Returns -1 if the pair wasn't found. - int static MetadataMatchCount( - const std::multimap& metadata, - const grpc::string& key, const grpc::string& value) { - int count = 0; - for (const auto& metadatum : metadata) { - if (ToString(metadatum.first) == key && - ToString(metadatum.second) == value) { - count++; - } - } - return count; - } - - int static GetIntValueFromMetadataHelper( - const char* key, - const std::multimap& metadata, - int default_value) { - if (metadata.find(key) != metadata.end()) { - std::istringstream iss(ToString(metadata.find(key)->second)); - iss >> default_value; - gpr_log(GPR_INFO, "%s : %d", key, default_value); - } - - return default_value; - } - - int static GetIntValueFromMetadata( - const char* key, - const std::multimap& metadata, - int default_value) { - return GetIntValueFromMetadataHelper(key, metadata, default_value); - } - - void static ServerTryCancel(ServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - // Now wait until it's really canceled - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000, GPR_TIMESPAN))); - } - } - Status Echo(ServerContext* context, const EchoRequest* request, EchoResponse* response) { if (request->has_param() && @@ -306,7 +305,6 @@ class TestMultipleServiceImpl : public RpcService { } // Unimplemented is left unimplemented to test the returned error. - Status RequestStream(ServerContext* context, ServerReader* reader, EchoResponse* response) { diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 0b1a474d265..204df6f8687 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1363,7 +1363,8 @@ class XdsEnd2endTest : public ::testing::TestWithParam { return backend_ports; } - Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000, + Status SendRpc(const string& method_name = "Echo", + EchoResponse* response = nullptr, int timeout_ms = 1000, bool wait_for_ready = false, bool server_fail = false) { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; @@ -1376,44 +1377,26 @@ class XdsEnd2endTest : public ::testing::TestWithParam { ClientContext context; context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); if (wait_for_ready) context.set_wait_for_ready(true); - Status status = stub_->Echo(&context, request, response); - if (local_response) delete response; - return status; - } - - Status SendEcho1Rpc(EchoResponse* response = nullptr, int timeout_ms = 1000, - bool wait_for_ready = false) { - const bool local_response = (response == nullptr); - if (local_response) response = new EchoResponse; - EchoRequest request; - request.set_message(kRequestMessage_); - ClientContext context; - context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); - if (wait_for_ready) context.set_wait_for_ready(true); - Status status = stub1_->Echo1(&context, request, response); - if (local_response) delete response; - return status; - } - - Status SendEcho2Rpc(EchoResponse* response = nullptr, int timeout_ms = 1000, - bool wait_for_ready = false) { - const bool local_response = (response == nullptr); - if (local_response) response = new EchoResponse; - EchoRequest request; - request.set_message(kRequestMessage_); - ClientContext context; - context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); - if (wait_for_ready) context.set_wait_for_ready(true); - Status status = stub2_->Echo2(&context, request, response); + Status status; + if (method_name == "Echo") { + status = stub_->Echo(&context, request, response); + } else if (method_name == "Echo1") { + status = stub1_->Echo1(&context, request, response); + } else if (method_name == "Echo2") { + status = stub2_->Echo2(&context, request, response); + } if (local_response) delete response; return status; } - void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000, + void CheckRpcSendOk(const size_t times = 1, + const string& method_name = "Echo", + const int timeout_ms = 1000, bool wait_for_ready = false) { for (size_t i = 0; i < times; ++i) { EchoResponse response; - const Status status = SendRpc(&response, timeout_ms, wait_for_ready); + const Status status = + SendRpc(method_name, &response, timeout_ms, wait_for_ready); EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); EXPECT_EQ(response.message(), kRequestMessage_); @@ -1422,33 +1405,11 @@ class XdsEnd2endTest : public ::testing::TestWithParam { void CheckRpcSendFailure(const size_t times = 1, bool server_fail = false) { for (size_t i = 0; i < times; ++i) { - const Status status = SendRpc(nullptr, 1000, false, server_fail); + const Status status = SendRpc("Echo", nullptr, 1000, false, server_fail); EXPECT_FALSE(status.ok()); } } - void CheckEcho1RpcSendOk(const size_t times = 1, const int timeout_ms = 1000, - bool wait_for_ready = false) { - for (size_t i = 0; i < times; ++i) { - EchoResponse response; - const Status status = SendEcho1Rpc(&response, timeout_ms, wait_for_ready); - EXPECT_TRUE(status.ok()) << "code=" << status.error_code() - << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage_); - } - } - - void CheckEcho2RpcSendOk(const size_t times = 1, const int timeout_ms = 1000, - bool wait_for_ready = false) { - for (size_t i = 0; i < times; ++i) { - EchoResponse response; - const Status status = SendEcho2Rpc(&response, timeout_ms, wait_for_ready); - EXPECT_TRUE(status.ok()) << "code=" << status.error_code() - << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage_); - } - } - public: // This method could benefit test subclasses; to make it accessible // via bind with a qualified name, it needs to be public. @@ -1738,7 +1699,7 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) { kDefaultResourceName)); const auto t0 = system_clock::now(); // Client will block: LB will initially send empty serverlist. - CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */); + CheckRpcSendOk(1, "Echo", kCallDeadlineMs, true /* wait_for_ready */); const auto ellapsed_ms = std::chrono::duration_cast( system_clock::now() - t0); @@ -1786,7 +1747,7 @@ TEST_P(BasicTest, BackendsRestart) { CheckRpcSendFailure(); // Restart all backends. RPCs should start succeeding again. StartAllBackends(); - CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */, + CheckRpcSendOk(1 /* times */, "Echo", 2000 /* timeout_ms */, true /* wait_for_ready */); } @@ -2307,9 +2268,9 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, 1000, true); - CheckEcho1RpcSendOk(kNumEcho1Rpcs, 1000, true); - CheckEcho2RpcSendOk(kNumEcho2Rpcs, 1000, true); + CheckRpcSendOk(kNumEchoRpcs, "Echo", 1000, true); + CheckRpcSendOk(kNumEcho1Rpcs, "Echo1", 1000, true); + CheckRpcSendOk(kNumEcho2Rpcs, "Echo2", 1000, true); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -2378,9 +2339,9 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, 1000, true); - CheckEcho1RpcSendOk(kNumEcho1Rpcs, 1000, true); - CheckEcho2RpcSendOk(kNumEcho2Rpcs, 1000, true); + CheckRpcSendOk(kNumEchoRpcs, "Echo", 1000, true); + CheckRpcSendOk(kNumEcho1Rpcs, "Echo1", 1000, true); + CheckRpcSendOk(kNumEcho2Rpcs, "Echo2", 1000, true); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -3090,7 +3051,7 @@ TEST_P(DropTest, Vanilla) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3130,7 +3091,7 @@ TEST_P(DropTest, DropPerHundred) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3169,7 +3130,7 @@ TEST_P(DropTest, DropPerTenThousand) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3212,7 +3173,7 @@ TEST_P(DropTest, Update) { gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3244,7 +3205,7 @@ TEST_P(DropTest, Update) { size_t num_rpcs = kNumRpcs; while (seen_drop_rate < kDropRateThreshold) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); ++num_rpcs; if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { @@ -3261,7 +3222,7 @@ TEST_P(DropTest, Update) { gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3298,7 +3259,7 @@ TEST_P(DropTest, DropAll) { // Send kNumRpcs RPCs and all of them are dropped. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); } @@ -3441,7 +3402,7 @@ TEST_P(FallbackTest, FallbackEarlyWhenBalancerChannelFails) { SetNextResolutionForLbChannel({g_port_saver->GetPort()}); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. - CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, + CheckRpcSendOk(/* times */ 1, "Echo", /* timeout_ms */ 1000, /* wait_for_ready */ false); } @@ -3456,7 +3417,7 @@ TEST_P(FallbackTest, FallbackEarlyWhenBalancerCallFails) { balancers_[0]->ads_service()->NotifyDoneWithAdsCall(); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. - CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, + CheckRpcSendOk(/* times */ 1, "Echo", /* timeout_ms */ 1000, /* wait_for_ready */ false); } @@ -3928,7 +3889,7 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { // Send kNumRpcs RPCs and count the drops. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc(&response); + const Status status = SendRpc("Echo", &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; From 9417b28bca22a6f86afc5cb1d22e71224e76f047 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 00:44:47 -0700 Subject: [PATCH 24/37] Fixing build issues. --- test/cpp/end2end/test_service_impl.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 4e12201bacd..88416853d54 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -57,6 +57,7 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; +namespace { // When echo_deadline is requested, deadline seen in the ServerContext is set in // the response in seconds. void MaybeEchoDeadline(experimental::ServerContextBase* context, @@ -136,6 +137,7 @@ void ServerTryCancel(ServerContext* context) { gpr_time_from_micros(1000, GPR_TIMESPAN))); } } +} // namespace class TestServiceSignaller { public: From 921147d1aa3ba4e0edd2dcf36623868022ccd487 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 01:02:57 -0700 Subject: [PATCH 25/37] clang change --- test/cpp/end2end/test_service_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 88416853d54..429ead3eb0b 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -137,7 +137,7 @@ void ServerTryCancel(ServerContext* context) { gpr_time_from_micros(1000, GPR_TIMESPAN))); } } -} // namespace +} // namespace class TestServiceSignaller { public: From b39feead6221a702b6a3d3873a54f6f16883f5d8 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 12:42:17 -0700 Subject: [PATCH 26/37] Fixing code review comments. --- .../ext/filters/client_channel/xds/xds_api.cc | 2 +- test/cpp/end2end/test_service_impl.h | 1 + test/cpp/end2end/xds_end2end_test.cc | 118 ++++++++++++++++-- 3 files changed, 111 insertions(+), 10 deletions(-) diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 13d5b0bd4d2..eba5e25e6bf 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1026,7 +1026,7 @@ grpc_error* RouteConfigParse( if (prefix.size > 0) { std::vector prefix_elements = absl::StrSplit( absl::string_view(prefix.data, prefix.size).substr(1), '/'); - if (prefix_elements.size() != 1) { + if (prefix_elements.size() != 2) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix not in the required format of /service/"); } diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 429ead3eb0b..ed42b1401bf 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -307,6 +307,7 @@ class TestMultipleServiceImpl : public RpcService { } // Unimplemented is left unimplemented to test the returned error. + Status RequestStream(ServerContext* context, ServerReader* reader, EchoResponse* response) { diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 204df6f8687..38e7370f771 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1363,6 +1363,87 @@ class XdsEnd2endTest : public ::testing::TestWithParam { return backend_ports; } + enum RpcServiceMethod { + TEST_ECHO, + TEST_ECHO1, + TEST_ECHO2, + TEST1_ECHO, + TEST1_ECHO1, + TEST1_ECHO2, + TEST2_ECHO, + TEST2_ECHO1, + TEST2_ECHO2, + }; + + struct RpcOptions { + RpcServiceMethod service_method = TEST_ECHO; + EchoResponse* response = nullptr; + int timeout_ms = 1000; + bool wait_for_ready = false; + bool server_fail = false; + int times = 1; + }; + + // TODO@donnadionne: Will replace SendRpc in all tests. + Status SendRpcNew(const RpcOptions& rpc_options, + EchoResponse* response = nullptr) { + const bool local_response = (response == nullptr); + if (local_response) response = new EchoResponse; + EchoRequest request; + request.set_message(kRequestMessage_); + if (rpc_options.server_fail) { + request.mutable_param()->mutable_expected_error()->set_code( + GRPC_STATUS_FAILED_PRECONDITION); + } + ClientContext context; + context.set_deadline( + grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); + if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); + Status status; + switch (rpc_options.service_method) { + case TEST_ECHO: + status = stub_->Echo(&context, request, response); + break; + case TEST_ECHO1: + status = stub_->Echo1(&context, request, response); + break; + case TEST_ECHO2: + status = stub_->Echo2(&context, request, response); + break; + case TEST1_ECHO: + status = stub1_->Echo(&context, request, response); + break; + case TEST1_ECHO1: + status = stub1_->Echo1(&context, request, response); + break; + case TEST1_ECHO2: + status = stub1_->Echo2(&context, request, response); + break; + case TEST2_ECHO: + status = stub2_->Echo(&context, request, response); + break; + case TEST2_ECHO1: + status = stub2_->Echo1(&context, request, response); + break; + case TEST2_ECHO2: + status = stub2_->Echo2(&context, request, response); + break; + } + if (local_response) delete response; + return status; + } + + // TODO@donnadionne: Will replace ChedkRpcSendOk in all tests. + void CheckRpcSendOkNew(const RpcOptions& rpc_options) { + for (size_t i = 0; i < rpc_options.times; ++i) { + EchoResponse response; + const Status status = SendRpcNew(rpc_options, &response); + EXPECT_TRUE(status.ok()) << "code=" << status.error_code() + << " message=" << status.error_message(); + EXPECT_EQ(response.message(), kRequestMessage_); + } + } + Status SendRpc(const string& method_name = "Echo", EchoResponse* response = nullptr, int timeout_ms = 1000, bool wait_for_ready = false, bool server_fail = false) { @@ -2186,6 +2267,9 @@ TEST_P(LdsTest, RouteHasNoRouteAction) { AdsServiceImpl::NACKED); } +// TODO@donnadionne: Add more invalid config tests to cover all errors in +// xds_api.cc + // Tests that LDS client should send a NACK if RouteAction has a // cluster_specifier other than cluster in the LDS response. TEST_P(LdsTest, RouteActionHasNoCluster) { @@ -2268,9 +2352,17 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, "Echo", 1000, true); - CheckRpcSendOk(kNumEcho1Rpcs, "Echo1", 1000, true); - CheckRpcSendOk(kNumEcho2Rpcs, "Echo2", 1000, true); + RpcOptions rpc_options; + rpc_options.times = kNumEchoRpcs; + rpc_options.service_method = TEST_ECHO; + rpc_options.wait_for_ready = true; + CheckRpcSendOkNew(rpc_options); + rpc_options.times = kNumEcho1Rpcs; + rpc_options.service_method = TEST1_ECHO1; + CheckRpcSendOkNew(rpc_options); + rpc_options.times = kNumEcho2Rpcs; + rpc_options.service_method = TEST2_ECHO2; + CheckRpcSendOkNew(rpc_options); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -2327,10 +2419,10 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { RouteConfiguration new_route_config = balancers_[0]->ads_service()->default_route_config(); auto* route1 = new_route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/"); route1->mutable_route()->set_cluster(kNewCluster1Name); auto* route2 = new_route_config.mutable_virtual_hosts(0)->add_routes(); - route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service"); + route2->mutable_match()->set_prefix("/grpc.testing.EchoTest2Service/"); route2->mutable_route()->set_cluster(kNewCluster2Name); auto* default_route = new_route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2339,9 +2431,17 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - CheckRpcSendOk(kNumEchoRpcs, "Echo", 1000, true); - CheckRpcSendOk(kNumEcho1Rpcs, "Echo1", 1000, true); - CheckRpcSendOk(kNumEcho2Rpcs, "Echo2", 1000, true); + RpcOptions rpc_options; + rpc_options.times = kNumEchoRpcs; + rpc_options.service_method = TEST_ECHO; + rpc_options.wait_for_ready = true; + CheckRpcSendOkNew(rpc_options); + rpc_options.times = kNumEcho1Rpcs; + rpc_options.service_method = TEST1_ECHO1; + CheckRpcSendOkNew(rpc_options); + rpc_options.times = kNumEcho2Rpcs; + rpc_options.service_method = TEST2_ECHO2; + CheckRpcSendOkNew(rpc_options); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -2438,7 +2538,7 @@ TEST_P(RdsTest, RouteMatchHasNonemptyPrefix) { route_config.mutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_match() - ->set_prefix("nonempty_prefix"); + ->set_prefix("/nonempty_prefix/"); balancers_[0]->ads_service()->SetRdsResource(route_config, kDefaultResourceName); SetNextResolution({}); From 3135d9751a4382ca2aee184cde2da68b9ba9141c Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 13:06:16 -0700 Subject: [PATCH 27/37] Fixing a test after merge --- test/cpp/end2end/xds_end2end_test.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 25b19adff79..bec71a38398 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1700,7 +1700,7 @@ TEST_P(BasicTest, Vanilla) { } // Check LB policy name for the channel. EXPECT_EQ( - (GetParam().use_xds_resolver() ? "cds_experimental" : "eds_experimental"), + (GetParam().use_xds_resolver() ? "xds_routing_experimental" : "eds_experimental"), channel_->GetLoadBalancingPolicyName()); } From 84b2b98af2b822de8c32e6c848483851b148b75b Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Tue, 14 Apr 2020 13:07:23 -0700 Subject: [PATCH 28/37] clang fix --- test/cpp/end2end/xds_end2end_test.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index bec71a38398..f60ece08866 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1699,9 +1699,9 @@ TEST_P(BasicTest, Vanilla) { backends_[i]->backend_service()->request_count()); } // Check LB policy name for the channel. - EXPECT_EQ( - (GetParam().use_xds_resolver() ? "xds_routing_experimental" : "eds_experimental"), - channel_->GetLoadBalancingPolicyName()); + EXPECT_EQ((GetParam().use_xds_resolver() ? "xds_routing_experimental" + : "eds_experimental"), + channel_->GetLoadBalancingPolicyName()); } TEST_P(BasicTest, IgnoresUnhealthyEndpoints) { From 71aef940c33b8751a6847da882c0e08b4d7c7013 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Wed, 15 Apr 2020 13:09:03 -0700 Subject: [PATCH 29/37] Code review comments: added invalid config tests and restructured testing code. --- .../lb_policy/xds/xds_routing.cc | 3 + .../ext/filters/client_channel/xds/xds_api.cc | 19 + test/cpp/end2end/test_service_impl.cc | 30 +- test/cpp/end2end/test_service_impl.h | 48 +-- test/cpp/end2end/xds_end2end_test.cc | 337 +++++++++++------- 5 files changed, 274 insertions(+), 163 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc index 473870eacac..34aa788ad89 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_routing.cc @@ -215,6 +215,9 @@ class XdsRoutingLb : public LoadBalancingPolicy { XdsRoutingLb::PickResult XdsRoutingLb::RoutePicker::Pick(PickArgs args) { absl::string_view path; + // TODO(roth): Using const auto& here trigger a warning in a macos or windows + // build: + //*(args.initial_metadata) is returning values not references. for (const auto p : *(args.initial_metadata)) { if (p.first == ":path") { path = p.second; diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index eba5e25e6bf..932ad12b626 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1024,11 +1024,20 @@ grpc_error* RouteConfigParse( if (envoy_api_v2_route_RouteMatch_has_prefix(match)) { upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); if (prefix.size > 0) { + if (prefix.data[0] != '/') { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix is not starting with a /"); + } std::vector prefix_elements = absl::StrSplit( absl::string_view(prefix.data, prefix.size).substr(1), '/'); if (prefix_elements.size() != 2) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix not in the required format of /service/"); + } else if (!prefix_elements[1].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix is not ending with a /"); + } else if (prefix_elements[0].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Prefix cannot be empty"); } rds_route.service = std::string(prefix_elements[0]); } @@ -1038,11 +1047,21 @@ grpc_error* RouteConfigParse( return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path if set cannot be empty"); } + if (path.data[0] != '/') { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Path is not starting with a /"); + } std::vector path_elements = absl::StrSplit( absl::string_view(path.data, path.size).substr(1), '/'); if (path_elements.size() != 2) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Path not in the required format of /service/method"); + } else if (path_elements[0].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Path service name cannot be empty"); + } else if (path_elements[1].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Path method name cannot be empty"); } rds_route.service = std::string(path_elements[0]); rds_route.method = std::string(path_elements[1]); diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc index 6517c5d6cc1..98f3cf44bec 100644 --- a/test/cpp/end2end/test_service_impl.cc +++ b/test/cpp/end2end/test_service_impl.cc @@ -132,7 +132,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( error.error_message(), error.binary_error_details())); return; } - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, ctx_->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel != DO_NOT_CANCEL) { // Since this is a unary RPC, by the time this server handler is called, @@ -147,7 +147,7 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( } gpr_log(GPR_DEBUG, "Request message was %s", req_->message().c_str()); resp_->set_message(req_->message()); - MaybeEchoDeadline(ctx_, req_, resp_); + internal::MaybeEchoDeadline(ctx_, req_, resp_); if (service_->host_) { resp_->mutable_param()->set_host(*service_->host_); } @@ -201,9 +201,9 @@ experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( if (req_->has_param() && (req_->param().expected_client_identity().length() > 0 || req_->param().check_auth_context())) { - CheckServerAuthContext(ctx_, - req_->param().expected_transport_security_type(), - req_->param().expected_client_identity()); + internal::CheckServerAuthContext( + ctx_, req_->param().expected_transport_security_type(), + req_->param().expected_client_identity()); } if (req_->has_param() && req_->param().response_message_length() > 0) { resp_->set_message( @@ -247,9 +247,9 @@ CallbackTestServiceImpl::CheckClientInitialMetadata( class Reactor : public ::grpc::experimental::ServerUnaryReactor { public: explicit Reactor(experimental::CallbackServerContext* ctx) { - EXPECT_EQ(MetadataMatchCount(ctx->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), + EXPECT_EQ(internal::MetadataMatchCount(ctx->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), 1); EXPECT_EQ(ctx->client_metadata().count(kCheckClientInitialMetadataKey), 1u); @@ -272,7 +272,7 @@ CallbackTestServiceImpl::RequestStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { ServerTryCancelNonblocking(context); @@ -358,7 +358,7 @@ CallbackTestServiceImpl::ResponseStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { ServerTryCancelNonblocking(context); @@ -370,9 +370,9 @@ CallbackTestServiceImpl::ResponseStream( Reactor(experimental::CallbackServerContext* ctx, const EchoRequest* request, int server_try_cancel) : ctx_(ctx), request_(request), server_try_cancel_(server_try_cancel) { - server_coalescing_api_ = GetIntValueFromMetadata( + server_coalescing_api_ = internal::GetIntValueFromMetadata( kServerUseCoalescingApi, ctx->client_metadata(), 0); - server_responses_to_send_ = GetIntValueFromMetadata( + server_responses_to_send_ = internal::GetIntValueFromMetadata( kServerResponseStreamsToSend, ctx->client_metadata(), kServerDefaultResponseStreamsToSend); if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { @@ -457,10 +457,10 @@ CallbackTestServiceImpl::BidiStream( // is cancelled while the server is reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - server_try_cancel_ = GetIntValueFromMetadata( + server_try_cancel_ = internal::GetIntValueFromMetadata( kServerTryCancelRequest, ctx->client_metadata(), DO_NOT_CANCEL); - server_write_last_ = GetIntValueFromMetadata(kServerFinishAfterNReads, - ctx->client_metadata(), 0); + server_write_last_ = internal::GetIntValueFromMetadata( + kServerFinishAfterNReads, ctx->client_metadata(), 0); if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) { ServerTryCancelNonblocking(ctx); } else { diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index ed42b1401bf..3210d5ec25f 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -57,7 +57,7 @@ typedef enum { CANCEL_AFTER_PROCESSING } ServerTryCancelRequestPhase; -namespace { +namespace internal { // When echo_deadline is requested, deadline seen in the ServerContext is set in // the response in seconds. void MaybeEchoDeadline(experimental::ServerContextBase* context, @@ -137,7 +137,7 @@ void ServerTryCancel(ServerContext* context) { gpr_time_from_micros(1000, GPR_TIMESPAN))); } } -} // namespace +} // namespace internal class TestServiceSignaller { public: @@ -200,19 +200,19 @@ class TestMultipleServiceImpl : public RpcService { return Status(static_cast(error.code()), error.error_message(), error.binary_error_details()); } - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel > DO_NOT_CANCEL) { // Since this is a unary RPC, by the time this server handler is called, // the 'request' message is already read from the client. So the scenarios // in server_try_cancel don't make much sense. Just cancel the RPC as long // as server_try_cancel is not DO_NOT_CANCEL - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } response->set_message(request->message()); - MaybeEchoDeadline(context, request, response); + internal::MaybeEchoDeadline(context, request, response); if (host_) { response->mutable_param()->set_host(*host_); } @@ -269,7 +269,7 @@ class TestMultipleServiceImpl : public RpcService { if (request->has_param() && (request->param().expected_client_identity().length() > 0 || request->param().check_auth_context())) { - CheckServerAuthContext( + internal::CheckServerAuthContext( context, request->param().expected_transport_security_type(), request->param().expected_client_identity()); } @@ -297,9 +297,9 @@ class TestMultipleServiceImpl : public RpcService { Status CheckClientInitialMetadata(ServerContext* context, const SimpleRequest* /*request*/, SimpleResponse* /*response*/) { - EXPECT_EQ(MetadataMatchCount(context->client_metadata(), - kCheckClientInitialMetadataKey, - kCheckClientInitialMetadataVal), + EXPECT_EQ(internal::MetadataMatchCount(context->client_metadata(), + kCheckClientInitialMetadataKey, + kCheckClientInitialMetadataVal), 1); EXPECT_EQ(1u, context->client_metadata().count(kCheckClientInitialMetadataKey)); @@ -319,21 +319,21 @@ class TestMultipleServiceImpl : public RpcService { // reading messages from the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server reads // all the messages from the client - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); EchoRequest request; response->set_message(""); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } std::thread* server_try_cancel_thd = nullptr; if (server_try_cancel == CANCEL_DURING_PROCESSING) { server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); + new std::thread([context] { internal::ServerTryCancel(context); }); } int num_msgs_read = 0; @@ -349,7 +349,7 @@ class TestMultipleServiceImpl : public RpcService { } if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } @@ -368,18 +368,18 @@ class TestMultipleServiceImpl : public RpcService { // writing messages to the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server writes // all the messages to the client - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); - int server_coalescing_api = GetIntValueFromMetadata( + int server_coalescing_api = internal::GetIntValueFromMetadata( kServerUseCoalescingApi, context->client_metadata(), 0); - int server_responses_to_send = GetIntValueFromMetadata( + int server_responses_to_send = internal::GetIntValueFromMetadata( kServerResponseStreamsToSend, context->client_metadata(), kServerDefaultResponseStreamsToSend); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } @@ -387,7 +387,7 @@ class TestMultipleServiceImpl : public RpcService { std::thread* server_try_cancel_thd = nullptr; if (server_try_cancel == CANCEL_DURING_PROCESSING) { server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); + new std::thread([context] { internal::ServerTryCancel(context); }); } for (int i = 0; i < server_responses_to_send; i++) { @@ -406,7 +406,7 @@ class TestMultipleServiceImpl : public RpcService { } if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } @@ -423,26 +423,26 @@ class TestMultipleServiceImpl : public RpcService { // reading/writing messages from/to the client // CANCEL_AFTER_PROCESSING: The RPC is cancelled after the server // reads/writes all messages from/to the client - int server_try_cancel = GetIntValueFromMetadata( + int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); EchoRequest request; EchoResponse response; if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } std::thread* server_try_cancel_thd = nullptr; if (server_try_cancel == CANCEL_DURING_PROCESSING) { server_try_cancel_thd = - new std::thread([context] { ServerTryCancel(context); }); + new std::thread([context] { internal::ServerTryCancel(context); }); } // kServerFinishAfterNReads suggests after how many reads, the server should // write the last message and send status (coalesced using WriteLast) - int server_write_last = GetIntValueFromMetadata( + int server_write_last = internal::GetIntValueFromMetadata( kServerFinishAfterNReads, context->client_metadata(), 0); int read_counts = 0; @@ -464,7 +464,7 @@ class TestMultipleServiceImpl : public RpcService { } if (server_try_cancel == CANCEL_AFTER_PROCESSING) { - ServerTryCancel(context); + internal::ServerTryCancel(context); return Status::CANCELLED; } diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index f60ece08866..6d957a77006 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1359,121 +1359,103 @@ class XdsEnd2endTest : public ::testing::TestWithParam { return backend_ports; } - enum RpcServiceMethod { - TEST_ECHO, - TEST_ECHO1, - TEST_ECHO2, - TEST1_ECHO, - TEST1_ECHO1, - TEST1_ECHO2, - TEST2_ECHO, - TEST2_ECHO1, - TEST2_ECHO2, + enum RpcService { + SERVICE_ECHO, + SERVICE_ECHO1, + SERVICE_ECHO2, + }; + + enum RpcMethod { + METHOD_ECHO, + METHOD_ECHO1, + METHOD_ECHO2, }; struct RpcOptions { - RpcServiceMethod service_method = TEST_ECHO; - EchoResponse* response = nullptr; + RpcService service = SERVICE_ECHO; + RpcMethod method = METHOD_ECHO; int timeout_ms = 1000; bool wait_for_ready = false; bool server_fail = false; - int times = 1; + + RpcOptions() {} + + RpcOptions& set_rpc_service(RpcService rpc_service) { + service = rpc_service; + return *this; + } + + RpcOptions& set_rpc_method(RpcMethod rpc_method) { + method = rpc_method; + return *this; + } + + RpcOptions& set_timeout_ms(int rpc_timeout_ms) { + timeout_ms = rpc_timeout_ms; + return *this; + } + + RpcOptions& set_wait_for_ready(bool rpc_wait_for_ready) { + wait_for_ready = rpc_wait_for_ready; + return *this; + } + + RpcOptions& set_server_fail(bool rpc_server_fail) { + server_fail = rpc_server_fail; + return *this; + } }; // TODO@donnadionne: Will replace SendRpc in all tests. - Status SendRpcNew(const RpcOptions& rpc_options, - EchoResponse* response = nullptr) { - const bool local_response = (response == nullptr); - if (local_response) response = new EchoResponse; - EchoRequest request; - request.set_message(kRequestMessage_); - if (rpc_options.server_fail) { - request.mutable_param()->mutable_expected_error()->set_code( - GRPC_STATUS_FAILED_PRECONDITION); - } + template + Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options, + EchoRequest& request, EchoResponse* response) { ClientContext context; context.set_deadline( grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); - Status status; - switch (rpc_options.service_method) { - case TEST_ECHO: - status = stub_->Echo(&context, request, response); - break; - case TEST_ECHO1: - status = stub_->Echo1(&context, request, response); - break; - case TEST_ECHO2: - status = stub_->Echo2(&context, request, response); - break; - case TEST1_ECHO: - status = stub1_->Echo(&context, request, response); - break; - case TEST1_ECHO1: - status = stub1_->Echo1(&context, request, response); - break; - case TEST1_ECHO2: - status = stub1_->Echo2(&context, request, response); - break; - case TEST2_ECHO: - status = stub2_->Echo(&context, request, response); - break; - case TEST2_ECHO1: - status = stub2_->Echo1(&context, request, response); - break; - case TEST2_ECHO2: - status = stub2_->Echo2(&context, request, response); - break; + switch (rpc_options.method) { + case METHOD_ECHO: + return (*stub)->Echo(&context, request, response); + case METHOD_ECHO1: + return (*stub)->Echo1(&context, request, response); + case METHOD_ECHO2: + return (*stub)->Echo2(&context, request, response); } - if (local_response) delete response; - return status; } - // TODO@donnadionne: Will replace ChedkRpcSendOk in all tests. - void CheckRpcSendOkNew(const RpcOptions& rpc_options) { - for (size_t i = 0; i < rpc_options.times; ++i) { - EchoResponse response; - const Status status = SendRpcNew(rpc_options, &response); - EXPECT_TRUE(status.ok()) << "code=" << status.error_code() - << " message=" << status.error_message(); - EXPECT_EQ(response.message(), kRequestMessage_); - } - } - - Status SendRpc(const string& method_name = "Echo", - EchoResponse* response = nullptr, int timeout_ms = 1000, - bool wait_for_ready = false, bool server_fail = false) { + Status SendRpc(const RpcOptions& rpc_options = RpcOptions(), + EchoResponse* response = nullptr) { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; request.set_message(kRequestMessage_); - if (server_fail) { + if (rpc_options.server_fail) { request.mutable_param()->mutable_expected_error()->set_code( GRPC_STATUS_FAILED_PRECONDITION); } - ClientContext context; - context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms)); - if (wait_for_ready) context.set_wait_for_ready(true); Status status; - if (method_name == "Echo") { - status = stub_->Echo(&context, request, response); - } else if (method_name == "Echo1") { - status = stub1_->Echo1(&context, request, response); - } else if (method_name == "Echo2") { - status = stub2_->Echo2(&context, request, response); + switch (rpc_options.service) { + case SERVICE_ECHO: + status = SendRpcMethod(&stub_, rpc_options, request, response); + break; + case SERVICE_ECHO1: + status = SendRpcMethod(&stub1_, rpc_options, request, response); + break; + case SERVICE_ECHO2: + status = SendRpcMethod(&stub2_, rpc_options, request, response); + break; } if (local_response) delete response; return status; } + // TODO@donnadionne: Will replace ChedkRpcSendOk in all tests. void CheckRpcSendOk(const size_t times = 1, - const string& method_name = "Echo", - const int timeout_ms = 1000, - bool wait_for_ready = false) { + const RpcOptions& rpc_options = RpcOptions()) { for (size_t i = 0; i < times; ++i) { EchoResponse response; - const Status status = - SendRpc(method_name, &response, timeout_ms, wait_for_ready); + const Status status = SendRpc(rpc_options, &response); EXPECT_TRUE(status.ok()) << "code=" << status.error_code() << " message=" << status.error_message(); EXPECT_EQ(response.message(), kRequestMessage_); @@ -1482,7 +1464,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { void CheckRpcSendFailure(const size_t times = 1, bool server_fail = false) { for (size_t i = 0; i < times; ++i) { - const Status status = SendRpc("Echo", nullptr, 1000, false, server_fail); + const Status status = SendRpc(RpcOptions().set_server_fail(server_fail)); EXPECT_FALSE(status.ok()); } } @@ -1778,7 +1760,8 @@ TEST_P(BasicTest, InitiallyEmptyServerlist) { kDefaultResourceName)); const auto t0 = system_clock::now(); // Client will block: LB will initially send empty serverlist. - CheckRpcSendOk(1, "Echo", kCallDeadlineMs, true /* wait_for_ready */); + CheckRpcSendOk( + 1, RpcOptions().set_timeout_ms(kCallDeadlineMs).set_wait_for_ready(true)); const auto ellapsed_ms = std::chrono::duration_cast( system_clock::now() - t0); @@ -1826,8 +1809,7 @@ TEST_P(BasicTest, BackendsRestart) { CheckRpcSendFailure(); // Restart all backends. RPCs should start succeeding again. StartAllBackends(); - CheckRpcSendOk(1 /* times */, "Echo", 2000 /* timeout_ms */, - true /* wait_for_ready */); + CheckRpcSendOk(1, RpcOptions().set_timeout_ms(2000).set_wait_for_ready(true)); } using XdsResolverOnlyTest = BasicTest; @@ -2232,15 +2214,126 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { AdsServiceImpl::NACKED); } -// Tests that LDS client should send a NACK if route match has empty path -// as the only route (default) in the LDS response. -TEST_P(LdsTest, RouteMatchHasEmptyPath) { +// Tests that LDS client should send a NACK if route match has a prefix +// not in the format "/service/": missing / or did not end with /. +TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + // Invalid case 1: no / + route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service"); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 2: missing / at the end + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 3: missing / at the beginning + route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 4: extra content outside of "/service/" + route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 5: empty prefix "//" + route1->mutable_match()->set_prefix("//"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); +} + +// Tests that LDS client should send a NACK if route match has path +// not in the format of "/service/method" +TEST_P(LdsTest, RouteMatchHasInvalidPath) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - route1->mutable_match()->set_path(""); auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); + // Invalid case 1: empty path + route1->mutable_match()->set_path(""); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 2: missing / at the beginning + route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 3: extra / at the end + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 4: missinga / in the middle + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 5: empty service "//Echo1" + route1->mutable_match()->set_path("//Echo1"); + balancers_[0]->ads_service()->SetLdsResource( + AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); + SetNextResolution({}); + SetNextResolutionForLbChannelAllBalancers(); + CheckRpcSendFailure(); + EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), + AdsServiceImpl::NACKED); + // Invalid case 5: empty method "/grpc.testing.EchoTest1Service/" + route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); SetNextResolution({}); @@ -2350,17 +2443,15 @@ TEST_P(LdsTest, XdsRoutingPathMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - RpcOptions rpc_options; - rpc_options.times = kNumEchoRpcs; - rpc_options.service_method = TEST_ECHO; - rpc_options.wait_for_ready = true; - CheckRpcSendOkNew(rpc_options); - rpc_options.times = kNumEcho1Rpcs; - rpc_options.service_method = TEST1_ECHO1; - CheckRpcSendOkNew(rpc_options); - rpc_options.times = kNumEcho2Rpcs; - rpc_options.service_method = TEST2_ECHO2; - CheckRpcSendOkNew(rpc_options); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO2) + .set_rpc_method(METHOD_ECHO2) + .set_wait_for_ready(true)); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -2429,17 +2520,15 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { balancers_[0]->ads_service()->BuildListener(new_route_config); balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); - RpcOptions rpc_options; - rpc_options.times = kNumEchoRpcs; - rpc_options.service_method = TEST_ECHO; - rpc_options.wait_for_ready = true; - CheckRpcSendOkNew(rpc_options); - rpc_options.times = kNumEcho1Rpcs; - rpc_options.service_method = TEST1_ECHO1; - CheckRpcSendOkNew(rpc_options); - rpc_options.times = kNumEcho2Rpcs; - rpc_options.service_method = TEST2_ECHO2; - CheckRpcSendOkNew(rpc_options); + CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO1) + .set_rpc_method(METHOD_ECHO1) + .set_wait_for_ready(true)); + CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions() + .set_rpc_service(SERVICE_ECHO2) + .set_rpc_method(METHOD_ECHO2) + .set_wait_for_ready(true)); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, @@ -2773,7 +2862,7 @@ TEST_P(LocalityMapTest, NoLocalities) { // Tests that the locality map can work properly even when it contains a large // number of localities. -TEST_P(LocalityMapTest, StressTest) { +/*TEST_P(LocalityMapTest, StressTest) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumLocalities = 100; @@ -2797,13 +2886,13 @@ TEST_P(LocalityMapTest, StressTest) { AdsServiceImpl::BuildEdsResource(args), 60 * 1000, kDefaultResourceName)); // Wait until backend 0 is ready, before which kNumLocalities localities are // received and handled by the xds policy. - WaitForBackend(0, /*reset_counters=*/false); + WaitForBackend(0, /*reset_counters=*false); EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); // Wait until backend 1 is ready, before which kNumLocalities localities are // removed by the xds policy. WaitForBackend(1); delayed_resource_setter.join(); -} +}*/ // Tests that the localities in a locality map are picked correctly after update // (addition, modification, deletion). @@ -3149,7 +3238,7 @@ TEST_P(DropTest, Vanilla) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3189,7 +3278,7 @@ TEST_P(DropTest, DropPerHundred) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3228,7 +3317,7 @@ TEST_P(DropTest, DropPerTenThousand) { size_t num_drops = 0; for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3271,7 +3360,7 @@ TEST_P(DropTest, Update) { gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3303,7 +3392,7 @@ TEST_P(DropTest, Update) { size_t num_rpcs = kNumRpcs; while (seen_drop_rate < kDropRateThreshold) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); ++num_rpcs; if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { @@ -3320,7 +3409,7 @@ TEST_P(DropTest, Update) { gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; @@ -3357,7 +3446,7 @@ TEST_P(DropTest, DropAll) { // Send kNumRpcs RPCs and all of them are dropped. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); EXPECT_EQ(status.error_code(), StatusCode::UNAVAILABLE); EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy"); } @@ -3752,7 +3841,7 @@ TEST_P(ClientLoadReportingWithDropTest, Vanilla) { // Send kNumRpcs RPCs and count the drops. for (size_t i = 0; i < kNumRpcs; ++i) { EchoResponse response; - const Status status = SendRpc("Echo", &response); + const Status status = SendRpc(RpcOptions(), &response); if (!status.ok() && status.error_message() == "Call dropped by load balancing policy") { ++num_drops; From 63573b0515338ab590c643e4a71d0f05f4e26eec Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Wed, 15 Apr 2020 17:33:31 -0700 Subject: [PATCH 30/37] Fixing a flaky test. --- test/cpp/end2end/xds_end2end_test.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 6d957a77006..c39712dc2ed 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -2219,7 +2219,7 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { ResetStub(/*failover_timeout=*/0, /*expected_targets=*/"", - /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_resource_does_not_exist_timeout*/ 30000, /*xds_routing_enabled=*/true); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); @@ -2279,7 +2279,7 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { TEST_P(LdsTest, RouteMatchHasInvalidPath) { ResetStub(/*failover_timeout=*/0, /*expected_targets=*/"", - /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_resource_does_not_exist_timeout*/ 30000, /*xds_routing_enabled=*/true); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); @@ -2862,7 +2862,7 @@ TEST_P(LocalityMapTest, NoLocalities) { // Tests that the locality map can work properly even when it contains a large // number of localities. -/*TEST_P(LocalityMapTest, StressTest) { +TEST_P(LocalityMapTest, StressTest) { SetNextResolution({}); SetNextResolutionForLbChannelAllBalancers(); const size_t kNumLocalities = 100; @@ -2886,13 +2886,13 @@ TEST_P(LocalityMapTest, NoLocalities) { AdsServiceImpl::BuildEdsResource(args), 60 * 1000, kDefaultResourceName)); // Wait until backend 0 is ready, before which kNumLocalities localities are // received and handled by the xds policy. - WaitForBackend(0, /*reset_counters=*false); + WaitForBackend(0, /*reset_counters=*/false); EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); // Wait until backend 1 is ready, before which kNumLocalities localities are // removed by the xds policy. WaitForBackend(1); delayed_resource_setter.join(); -}*/ +} // Tests that the localities in a locality map are picked correctly after update // (addition, modification, deletion). From 8bab28db0818bcc3c91e89c7aeb82f54f6135827 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 12:20:57 -0700 Subject: [PATCH 31/37] Rearrange test_service_impl.h/cc to fix build failures. --- test/cpp/end2end/BUILD | 15 +---- test/cpp/end2end/test_service_impl.cc | 96 ++++++++++++++++++++++++--- test/cpp/end2end/test_service_impl.h | 66 ++---------------- 3 files changed, 96 insertions(+), 81 deletions(-) diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index bb6be0c4f02..a20128dcc2d 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -35,19 +35,6 @@ grpc_cc_library( ], ) -grpc_cc_library( - name = "test_multiple_service_impl", - testonly = True, - hdrs = ["test_service_impl.h"], - external_deps = [ - "gtest", - ], - deps = [ - "//src/proto/grpc/testing:echo_proto", - "//test/cpp/util:test_util", - ], -) - grpc_cc_library( name = "test_health_check_service_impl", testonly = True, @@ -525,7 +512,7 @@ grpc_cc_test( "no_windows", ], # TODO(jtattermusch): fix test on windows deps = [ - ":test_multiple_service_impl", + ":test_service_impl", "//:gpr", "//:grpc", "//:grpc++", diff --git a/test/cpp/end2end/test_service_impl.cc b/test/cpp/end2end/test_service_impl.cc index 98f3cf44bec..12928c70b48 100644 --- a/test/cpp/end2end/test_service_impl.cc +++ b/test/cpp/end2end/test_service_impl.cc @@ -34,7 +34,87 @@ using std::chrono::system_clock; namespace grpc { namespace testing { -namespace { +namespace internal { + +// When echo_deadline is requested, deadline seen in the ServerContext is set in +// the response in seconds. +void MaybeEchoDeadline(experimental::ServerContextBase* context, + const EchoRequest* request, EchoResponse* response) { + if (request->has_param() && request->param().echo_deadline()) { + gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); + if (context->deadline() != system_clock::time_point::max()) { + Timepoint2Timespec(context->deadline(), &deadline); + } + response->mutable_param()->set_request_deadline(deadline.tv_sec); + } +} + +void CheckServerAuthContext( + const experimental::ServerContextBase* context, + const grpc::string& expected_transport_security_type, + const grpc::string& expected_client_identity) { + std::shared_ptr auth_ctx = context->auth_context(); + std::vector tst = + auth_ctx->FindPropertyValues("transport_security_type"); + EXPECT_EQ(1u, tst.size()); + EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); + if (expected_client_identity.empty()) { + EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); + EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); + EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); + } else { + auto identity = auth_ctx->GetPeerIdentity(); + EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); + EXPECT_EQ(1u, identity.size()); + EXPECT_EQ(expected_client_identity, identity[0]); + } +} + +// Returns the number of pairs in metadata that exactly match the given +// key-value pair. Returns -1 if the pair wasn't found. +int MetadataMatchCount( + const std::multimap& metadata, + const grpc::string& key, const grpc::string& value) { + int count = 0; + for (const auto& metadatum : metadata) { + if (ToString(metadatum.first) == key && + ToString(metadatum.second) == value) { + count++; + } + } + return count; +} + +int GetIntValueFromMetadataHelper( + const char* key, + const std::multimap& metadata, + int default_value) { + if (metadata.find(key) != metadata.end()) { + std::istringstream iss(ToString(metadata.find(key)->second)); + iss >> default_value; + gpr_log(GPR_INFO, "%s : %d", key, default_value); + } + + return default_value; +} + +int GetIntValueFromMetadata( + const char* key, + const std::multimap& metadata, + int default_value) { + return GetIntValueFromMetadataHelper(key, metadata, default_value); +} + +void ServerTryCancel(ServerContext* context) { + EXPECT_FALSE(context->IsCancelled()); + context->TryCancel(); + gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); + // Now wait until it's really canceled + while (!context->IsCancelled()) { + gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_micros(1000, GPR_TIMESPAN))); + } +} void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { EXPECT_FALSE(context->IsCancelled()); @@ -43,7 +123,7 @@ void ServerTryCancelNonblocking(experimental::CallbackServerContext* context) { "Server called TryCancelNonblocking() to cancel the request"); } -} // namespace +} // namespace internal experimental::ServerUnaryReactor* CallbackTestServiceImpl::Echo( experimental::CallbackServerContext* context, const EchoRequest* request, @@ -275,7 +355,7 @@ CallbackTestServiceImpl::RequestStream( int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancelNonblocking(context); + internal::ServerTryCancelNonblocking(context); // Don't need to provide a reactor since the RPC is canceled return nullptr; } @@ -316,7 +396,7 @@ CallbackTestServiceImpl::RequestStream( return; } if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); return; } FinishOnce(Status::OK); @@ -361,7 +441,7 @@ CallbackTestServiceImpl::ResponseStream( int server_try_cancel = internal::GetIntValueFromMetadata( kServerTryCancelRequest, context->client_metadata(), DO_NOT_CANCEL); if (server_try_cancel == CANCEL_BEFORE_PROCESSING) { - ServerTryCancelNonblocking(context); + internal::ServerTryCancelNonblocking(context); } class Reactor @@ -399,7 +479,7 @@ CallbackTestServiceImpl::ResponseStream( } else if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { // Let OnCancel recover this } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); } else { FinishOnce(Status::OK); } @@ -462,7 +542,7 @@ CallbackTestServiceImpl::BidiStream( server_write_last_ = internal::GetIntValueFromMetadata( kServerFinishAfterNReads, ctx->client_metadata(), 0); if (server_try_cancel_ == CANCEL_BEFORE_PROCESSING) { - ServerTryCancelNonblocking(ctx); + internal::ServerTryCancelNonblocking(ctx); } else { if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { ctx->TryCancel(); @@ -502,7 +582,7 @@ CallbackTestServiceImpl::BidiStream( if (server_try_cancel_ == CANCEL_DURING_PROCESSING) { // Let OnCancel handle this } else if (server_try_cancel_ == CANCEL_AFTER_PROCESSING) { - ServerTryCancelNonblocking(ctx_); + internal::ServerTryCancelNonblocking(ctx_); } else { FinishOnce(Status::OK); } diff --git a/test/cpp/end2end/test_service_impl.h b/test/cpp/end2end/test_service_impl.h index 3210d5ec25f..d890bdbaff0 100644 --- a/test/cpp/end2end/test_service_impl.h +++ b/test/cpp/end2end/test_service_impl.h @@ -61,82 +61,30 @@ namespace internal { // When echo_deadline is requested, deadline seen in the ServerContext is set in // the response in seconds. void MaybeEchoDeadline(experimental::ServerContextBase* context, - const EchoRequest* request, EchoResponse* response) { - if (request->has_param() && request->param().echo_deadline()) { - gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME); - if (context->deadline() != system_clock::time_point::max()) { - Timepoint2Timespec(context->deadline(), &deadline); - } - response->mutable_param()->set_request_deadline(deadline.tv_sec); - } -} + const EchoRequest* request, EchoResponse* response); void CheckServerAuthContext( const experimental::ServerContextBase* context, const grpc::string& expected_transport_security_type, - const grpc::string& expected_client_identity) { - std::shared_ptr auth_ctx = context->auth_context(); - std::vector tst = - auth_ctx->FindPropertyValues("transport_security_type"); - EXPECT_EQ(1u, tst.size()); - EXPECT_EQ(expected_transport_security_type, ToString(tst[0])); - if (expected_client_identity.empty()) { - EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); - EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); - EXPECT_FALSE(auth_ctx->IsPeerAuthenticated()); - } else { - auto identity = auth_ctx->GetPeerIdentity(); - EXPECT_TRUE(auth_ctx->IsPeerAuthenticated()); - EXPECT_EQ(1u, identity.size()); - EXPECT_EQ(expected_client_identity, identity[0]); - } -} + const grpc::string& expected_client_identity); // Returns the number of pairs in metadata that exactly match the given // key-value pair. Returns -1 if the pair wasn't found. int MetadataMatchCount( const std::multimap& metadata, - const grpc::string& key, const grpc::string& value) { - int count = 0; - for (const auto& metadatum : metadata) { - if (ToString(metadatum.first) == key && - ToString(metadatum.second) == value) { - count++; - } - } - return count; -} + const grpc::string& key, const grpc::string& value); int GetIntValueFromMetadataHelper( const char* key, const std::multimap& metadata, - int default_value) { - if (metadata.find(key) != metadata.end()) { - std::istringstream iss(ToString(metadata.find(key)->second)); - iss >> default_value; - gpr_log(GPR_INFO, "%s : %d", key, default_value); - } - - return default_value; -} + int default_value); int GetIntValueFromMetadata( const char* key, const std::multimap& metadata, - int default_value) { - return GetIntValueFromMetadataHelper(key, metadata, default_value); -} - -void ServerTryCancel(ServerContext* context) { - EXPECT_FALSE(context->IsCancelled()); - context->TryCancel(); - gpr_log(GPR_INFO, "Server called TryCancel() to cancel the request"); - // Now wait until it's really canceled - while (!context->IsCancelled()) { - gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), - gpr_time_from_micros(1000, GPR_TIMESPAN))); - } -} + int default_value); + +void ServerTryCancel(ServerContext* context); } // namespace internal class TestServiceSignaller { From fcc41aedc81d60f3092d764a1812981c5745235d Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 13:44:53 -0700 Subject: [PATCH 32/37] Fixing build errors. --- Makefile | 5 ++++- src/core/ext/filters/client_channel/xds/xds_api.cc | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 23275ef86cc..29ece2be8e1 100644 --- a/Makefile +++ b/Makefile @@ -19027,6 +19027,7 @@ XDS_END2END_TEST_SRC = \ $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc \ $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc \ test/cpp/end2end/xds_end2end_test.cc \ + test/cpp/end2end/test_service_impl.cc \ XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC)))) ifeq ($(NO_SECURE),true) @@ -19077,6 +19078,8 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lrs_for_test.o: $(LIBDIR)/$(CONF $(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(LIBDIR)/$(CONFIG)/libgrpcpp_channelz.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a + deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep) ifneq ($(NO_SECURE),true) @@ -19084,7 +19087,7 @@ ifneq ($(NO_DEPS),true) -include $(XDS_END2END_TEST_OBJS:.o=.dep) endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc XDS_INTEROP_CLIENT_SRC = \ diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index 54e24aa0c66..eb9fe41c948 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1025,7 +1025,7 @@ grpc_error* RouteConfigParse( upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); // Empty prefix "" is accepted. if (prefix.size == 1) { - //Prefix "/" is accepted. + // Prefix "/" is accepted. if (prefix.data[0] != '/') { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Prefix is not empty and does starting with a /"); From 39ad034e8890f46a89ac3e52588ad35f5d7d50cc Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 14:13:01 -0700 Subject: [PATCH 33/37] Fixing build errors --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 29ece2be8e1..50a3f375b09 100644 --- a/Makefile +++ b/Makefile @@ -19078,7 +19078,7 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lrs_for_test.o: $(LIBDIR)/$(CONF $(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(LIBDIR)/$(CONFIG)/libgrpcpp_channelz.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep) From 22c7d8d0cd0a9e5afdc91f56e40286b78cc748ff Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 14:55:22 -0700 Subject: [PATCH 34/37] Fixing build error --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 50a3f375b09..d1345dbfad5 100644 --- a/Makefile +++ b/Makefile @@ -19087,8 +19087,8 @@ ifneq ($(NO_DEPS),true) -include $(XDS_END2END_TEST_OBJS:.o=.dep) endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc - +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc XDS_INTEROP_CLIENT_SRC = \ $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc \ From aea9bfcbb2505d22236b191337bf6043e65c7f4c Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 15:29:26 -0700 Subject: [PATCH 35/37] Last bit of build failures. --- CMakeLists.txt | 1 + Makefile | 9 +++++---- build_autogenerated.yaml | 1 + 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 05396f8cdcc..9c13037f929 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14450,6 +14450,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX) ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.pb.h ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.h + test/cpp/end2end/test_service_impl.cc test/cpp/end2end/xds_end2end_test.cc third_party/googletest/googletest/src/gtest-all.cc third_party/googletest/googlemock/src/gmock-all.cc diff --git a/Makefile b/Makefile index d1345dbfad5..595d57e4a5a 100644 --- a/Makefile +++ b/Makefile @@ -19026,8 +19026,8 @@ XDS_END2END_TEST_SRC = \ $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc \ $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc \ $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc \ - test/cpp/end2end/xds_end2end_test.cc \ test/cpp/end2end/test_service_impl.cc \ + test/cpp/end2end/xds_end2end_test.cc \ XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC)))) ifeq ($(NO_SECURE),true) @@ -19076,10 +19076,10 @@ $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lds_rds_for_test.o: $(LIBDIR)/$( $(OBJDIR)/$(CONFIG)/src/proto/grpc/testing/xds/lrs_for_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a - $(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libaddress_sorting.a $(LIBDIR)/$(CONFIG)/libupb.a + deps_xds_end2end_test: $(XDS_END2END_TEST_OBJS:.o=.dep) ifneq ($(NO_SECURE),true) @@ -19087,8 +19087,9 @@ ifneq ($(NO_DEPS),true) -include $(XDS_END2END_TEST_OBJS:.o=.dep) endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc $(OBJDIR)/$(CONFIG)/test/cpp/end2end/test_service_impl.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.pb.cc $(GENDIR)/src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.pb.cc $(GENDIR)/src/proto/grpc/testing/echo.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/echo_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.pb.cc $(GENDIR)/src/proto/grpc/testing/simple_messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/ads_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/cds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lds_rds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/testing/xds/lrs_for_test.grpc.pb.cc + XDS_INTEROP_CLIENT_SRC = \ $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc \ diff --git a/build_autogenerated.yaml b/build_autogenerated.yaml index 02b8c925fd9..1cfd5c7b482 100644 --- a/build_autogenerated.yaml +++ b/build_autogenerated.yaml @@ -7558,6 +7558,7 @@ targets: - src/proto/grpc/testing/xds/eds_for_test.proto - src/proto/grpc/testing/xds/lds_rds_for_test.proto - src/proto/grpc/testing/xds/lrs_for_test.proto + - test/cpp/end2end/test_service_impl.cc - test/cpp/end2end/xds_end2end_test.cc deps: - grpc++_test_util From 424e81eaecb776f391fa1bfa5f581c39450ae8e4 Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Thu, 16 Apr 2020 15:47:26 -0700 Subject: [PATCH 36/37] Code Review comments --- test/cpp/end2end/xds_end2end_test.cc | 186 +++++++++++++++++++++------ 1 file changed, 149 insertions(+), 37 deletions(-) diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 7e1fa4af6af..6c3cf0dea84 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1406,21 +1406,20 @@ class XdsEnd2endTest : public ::testing::TestWithParam { } }; - // TODO@donnadionne: Will replace SendRpc in all tests. template Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options, - EchoRequest& request, EchoResponse* response) { - ClientContext context; - context.set_deadline( + ClientContext* context, EchoRequest& request, + EchoResponse* response) { + context->set_deadline( grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); - if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); + if (rpc_options.wait_for_ready) context->set_wait_for_ready(true); switch (rpc_options.method) { case METHOD_ECHO: - return (*stub)->Echo(&context, request, response); + return (*stub)->Echo(context, request, response); case METHOD_ECHO1: - return (*stub)->Echo1(&context, request, response); + return (*stub)->Echo1(context, request, response); case METHOD_ECHO2: - return (*stub)->Echo2(&context, request, response); + return (*stub)->Echo2(context, request, response); } } @@ -1429,6 +1428,7 @@ class XdsEnd2endTest : public ::testing::TestWithParam { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; + ClientContext* context = new ClientContext; request.set_message(kRequestMessage_); if (rpc_options.server_fail) { request.mutable_param()->mutable_expected_error()->set_code( @@ -1437,20 +1437,22 @@ class XdsEnd2endTest : public ::testing::TestWithParam { Status status; switch (rpc_options.service) { case SERVICE_ECHO: - status = SendRpcMethod(&stub_, rpc_options, request, response); + status = SendRpcMethod(&stub_, rpc_options, context, request, response); break; case SERVICE_ECHO1: - status = SendRpcMethod(&stub1_, rpc_options, request, response); + status = + SendRpcMethod(&stub1_, rpc_options, context, request, response); break; case SERVICE_ECHO2: - status = SendRpcMethod(&stub2_, rpc_options, request, response); + status = + SendRpcMethod(&stub2_, rpc_options, context, request, response); break; } + delete context; if (local_response) delete response; return status; } - // TODO@donnadionne: Will replace ChedkRpcSendOk in all tests. void CheckRpcSendOk(const size_t times = 1, const RpcOptions& rpc_options = RpcOptions()) { for (size_t i = 0; i < times; ++i) { @@ -2235,16 +2237,15 @@ TEST_P(LdsTest, RouteMatchHasNonemptyPrefix) { } // Tests that LDS client should send a NACK if route match has a prefix -// not in the format "/service/": missing / or did not end with /. -TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { +// string with no "/". +TEST_P(LdsTest, RouteMatchHasInvalidPrefixNonEmptyNoSlash) { ResetStub(/*failover_timeout=*/0, /*expected_targets=*/"", - /*xds_resource_does_not_exist_timeout*/ 30000, + /*xds_resource_does_not_exist_timeout*/ 0, /*xds_routing_enabled=*/true); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); - // Invalid case 1: no / route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service"); auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); @@ -2256,7 +2257,18 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 2: missing / at the end +} + +// Tests that LDS client should send a NACK if route match has a prefix +// string does not end with "/". +TEST_P(LdsTest, RouteMatchHasInvalidPrefixNoEndingSlash) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2265,7 +2277,18 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 3: missing / at the beginning +} + +// Tests that LDS client should send a NACK if route match has a prefix +// string does not start with "/". +TEST_P(LdsTest, RouteMatchHasInvalidPrefixNoLeadingSlash) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); route1->mutable_match()->set_prefix("grpc.testing.EchoTest1Service/"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2274,7 +2297,18 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 4: extra content outside of "/service/" +} + +// Tests that LDS client should send a NACK if route match has a prefix +// string with extra content outside of "/service/". +TEST_P(LdsTest, RouteMatchHasInvalidPrefixExtraContent) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); route1->mutable_match()->set_prefix("/grpc.testing.EchoTest1Service/Echo1"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2283,7 +2317,18 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 5: empty prefix "//" +} + +// Tests that LDS client should send a NACK if route match has a prefix +// string "//". +TEST_P(LdsTest, RouteMatchHasInvalidPrefixNoContent) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); route1->mutable_match()->set_prefix("//"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2295,11 +2340,11 @@ TEST_P(LdsTest, RouteMatchHasInvalidPrefix) { } // Tests that LDS client should send a NACK if route match has path -// not in the format of "/service/method" -TEST_P(LdsTest, RouteMatchHasInvalidPath) { +// but it's empty. +TEST_P(LdsTest, RouteMatchHasInvalidPathEmptyPath) { ResetStub(/*failover_timeout=*/0, /*expected_targets=*/"", - /*xds_resource_does_not_exist_timeout*/ 30000, + /*xds_resource_does_not_exist_timeout*/ 0, /*xds_routing_enabled=*/true); RouteConfiguration route_config = balancers_[0]->ads_service()->default_route_config(); @@ -2307,7 +2352,6 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); default_route->mutable_match()->set_prefix(""); default_route->mutable_route()->set_cluster(kDefaultResourceName); - // Invalid case 1: empty path route1->mutable_match()->set_path(""); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2316,7 +2360,21 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 2: missing / at the beginning +} + +// Tests that LDS client should send a NACK if route match has path +// string does not start with "/". +TEST_P(LdsTest, RouteMatchHasInvalidPathNoLeadingSlash) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); route1->mutable_match()->set_path("grpc.testing.EchoTest1Service/Echo1"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2325,7 +2383,21 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 3: extra / at the end +} + +// Tests that LDS client should send a NACK if route match has path +// string that ends with "/". +TEST_P(LdsTest, RouteMatchHasInvalidPathEndsWithSlash) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/Echo1/"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2334,7 +2406,21 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 4: missinga / in the middle +} + +// Tests that LDS client should send a NACK if route match has path +// string that misses "/" between service and method. +TEST_P(LdsTest, RouteMatchHasInvalidPathMissingMiddleSlash) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service.Echo1"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2343,7 +2429,21 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 5: empty service "//Echo1" +} + +// Tests that LDS client should send a NACK if route match has path +// string that is missing service. +TEST_P(LdsTest, RouteMatchHasInvalidPathMissingService) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); route1->mutable_match()->set_path("//Echo1"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2352,7 +2452,21 @@ TEST_P(LdsTest, RouteMatchHasInvalidPath) { CheckRpcSendFailure(); EXPECT_EQ(balancers_[0]->ads_service()->lds_response_state(), AdsServiceImpl::NACKED); - // Invalid case 5: empty method "/grpc.testing.EchoTest1Service/" +} + +// Tests that LDS client should send a NACK if route match has path +// string that is missing method. +TEST_P(LdsTest, RouteMatchHasInvalidPathMissingMethod) { + ResetStub(/*failover_timeout=*/0, + /*expected_targets=*/"", + /*xds_resource_does_not_exist_timeout*/ 0, + /*xds_routing_enabled=*/true); + RouteConfiguration route_config = + balancers_[0]->ads_service()->default_route_config(); + auto* route1 = route_config.mutable_virtual_hosts(0)->mutable_routes(0); + auto* default_route = route_config.mutable_virtual_hosts(0)->add_routes(); + default_route->mutable_match()->set_prefix(""); + default_route->mutable_route()->set_cluster(kDefaultResourceName); route1->mutable_match()->set_path("/grpc.testing.EchoTest1Service/"); balancers_[0]->ads_service()->SetLdsResource( AdsServiceImpl::BuildListener(route_config), kDefaultResourceName); @@ -2541,14 +2655,12 @@ TEST_P(LdsTest, XdsRoutingPrefixMatching) { balancers_[0]->ads_service()->SetLdsResource(listener, kDefaultResourceName); WaitForAllBackends(0, 2); CheckRpcSendOk(kNumEchoRpcs, RpcOptions().set_wait_for_ready(true)); - CheckRpcSendOk(kNumEcho1Rpcs, RpcOptions() - .set_rpc_service(SERVICE_ECHO1) - .set_rpc_method(METHOD_ECHO1) - .set_wait_for_ready(true)); - CheckRpcSendOk(kNumEcho2Rpcs, RpcOptions() - .set_rpc_service(SERVICE_ECHO2) - .set_rpc_method(METHOD_ECHO2) - .set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho1Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO1).set_wait_for_ready(true)); + CheckRpcSendOk( + kNumEcho2Rpcs, + RpcOptions().set_rpc_service(SERVICE_ECHO2).set_wait_for_ready(true)); // Make sure RPCs all go to the correct backend. for (size_t i = 0; i < 2; ++i) { EXPECT_EQ(kNumEchoRpcs / 2, From 7db619ba959d1c237ea4f2e54d3b3a13a2a276ec Mon Sep 17 00:00:00 2001 From: Donna Dionne Date: Fri, 17 Apr 2020 11:19:19 -0700 Subject: [PATCH 37/37] Code review comments. --- .../ext/filters/client_channel/xds/xds_api.cc | 43 +++++++++---------- test/cpp/end2end/xds_end2end_test.cc | 16 +++---- 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/core/ext/filters/client_channel/xds/xds_api.cc b/src/core/ext/filters/client_channel/xds/xds_api.cc index eb9fe41c948..4be62a33cd0 100644 --- a/src/core/ext/filters/client_channel/xds/xds_api.cc +++ b/src/core/ext/filters/client_channel/xds/xds_api.cc @@ -1024,29 +1024,28 @@ grpc_error* RouteConfigParse( if (envoy_api_v2_route_RouteMatch_has_prefix(match)) { upb_strview prefix = envoy_api_v2_route_RouteMatch_prefix(match); // Empty prefix "" is accepted. - if (prefix.size == 1) { + if (prefix.size > 0) { // Prefix "/" is accepted. if (prefix.data[0] != '/') { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix is not empty and does starting with a /"); + "Prefix does not start with a /"); } - } else if (prefix.size > 1) { - if (prefix.data[0] != '/') { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix is not starting with a /"); - } - std::vector prefix_elements = absl::StrSplit( - absl::string_view(prefix.data, prefix.size).substr(1), '/'); - if (prefix_elements.size() != 2) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix not in the required format of /service/"); - } else if (!prefix_elements[1].empty()) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Prefix is not ending with a /"); - } else if (prefix_elements[0].empty()) { - return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Prefix cannot be empty"); + if (prefix.size > 1) { + std::vector prefix_elements = absl::StrSplit( + absl::string_view(prefix.data, prefix.size).substr(1), + absl::MaxSplits('/', 1)); + if (prefix_elements.size() != 2) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix not in the required format of /service/"); + } else if (!prefix_elements[1].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix does not end with a /"); + } else if (prefix_elements[0].empty()) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Prefix contains empty service name"); + } + rds_route.service = std::string(prefix_elements[0]); } - rds_route.service = std::string(prefix_elements[0]); } } else if (envoy_api_v2_route_RouteMatch_has_path(match)) { upb_strview path = envoy_api_v2_route_RouteMatch_path(match); @@ -1056,7 +1055,7 @@ grpc_error* RouteConfigParse( } if (path.data[0] != '/') { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Path is not starting with a /"); + "Path does not start with a /"); } std::vector path_elements = absl::StrSplit( absl::string_view(path.data, path.size).substr(1), '/'); @@ -1065,10 +1064,10 @@ grpc_error* RouteConfigParse( "Path not in the required format of /service/method"); } else if (path_elements[0].empty()) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Path service name cannot be empty"); + "Path contains empty service name"); } else if (path_elements[1].empty()) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "Path method name cannot be empty"); + "Path contains empty method name"); } rds_route.service = std::string(path_elements[0]); rds_route.method = std::string(path_elements[1]); @@ -1092,7 +1091,7 @@ grpc_error* RouteConfigParse( envoy_api_v2_route_RouteAction_cluster(route_action); if (action.size == 0) { return GRPC_ERROR_CREATE_FROM_STATIC_STRING( - "RouteAction has empty cluster."); + "RouteAction contains empty cluster."); } rds_route.cluster_name = std::string(action.data, action.size); rds_update->routes.emplace_back(std::move(rds_route)); diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 6c3cf0dea84..29e3eb834d9 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -1410,9 +1410,6 @@ class XdsEnd2endTest : public ::testing::TestWithParam { Status SendRpcMethod(Stub* stub, const RpcOptions& rpc_options, ClientContext* context, EchoRequest& request, EchoResponse* response) { - context->set_deadline( - grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); - if (rpc_options.wait_for_ready) context->set_wait_for_ready(true); switch (rpc_options.method) { case METHOD_ECHO: return (*stub)->Echo(context, request, response); @@ -1428,7 +1425,10 @@ class XdsEnd2endTest : public ::testing::TestWithParam { const bool local_response = (response == nullptr); if (local_response) response = new EchoResponse; EchoRequest request; - ClientContext* context = new ClientContext; + ClientContext context; + context.set_deadline( + grpc_timeout_milliseconds_to_deadline(rpc_options.timeout_ms)); + if (rpc_options.wait_for_ready) context.set_wait_for_ready(true); request.set_message(kRequestMessage_); if (rpc_options.server_fail) { request.mutable_param()->mutable_expected_error()->set_code( @@ -1437,18 +1437,18 @@ class XdsEnd2endTest : public ::testing::TestWithParam { Status status; switch (rpc_options.service) { case SERVICE_ECHO: - status = SendRpcMethod(&stub_, rpc_options, context, request, response); + status = + SendRpcMethod(&stub_, rpc_options, &context, request, response); break; case SERVICE_ECHO1: status = - SendRpcMethod(&stub1_, rpc_options, context, request, response); + SendRpcMethod(&stub1_, rpc_options, &context, request, response); break; case SERVICE_ECHO2: status = - SendRpcMethod(&stub2_, rpc_options, context, request, response); + SendRpcMethod(&stub2_, rpc_options, &context, request, response); break; } - delete context; if (local_response) delete response; return status; }