diff --git a/BUILD b/BUILD index 7d5be55a332..d5e863c552d 100644 --- a/BUILD +++ b/BUILD @@ -1262,7 +1262,6 @@ grpc_cc_library( "envoy_ads_upb", "grpc_base", "grpc_client_channel", - "grpc_lb_upb", "grpc_resolver_fake", ], ) @@ -1286,7 +1285,6 @@ grpc_cc_library( "envoy_ads_upb", "grpc_base", "grpc_client_channel", - "grpc_lb_upb", "grpc_resolver_fake", "grpc_secure", ], diff --git a/CMakeLists.txt b/CMakeLists.txt index 70aaedd08a4..21511bb9738 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17674,17 +17674,24 @@ endif (gRPC_BUILD_TESTS) if (gRPC_BUILD_TESTS) add_executable(xds_end2end_test - ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/xds_for_test.pb.cc - ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.cc - ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/xds_for_test.pb.h - ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/eds_for_test.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/eds_for_test.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/lrs_for_test.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.cc + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/lrs_for_test.pb.h + ${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.h test/cpp/end2end/xds_end2end_test.cc third_party/googletest/googletest/src/gtest-all.cc third_party/googletest/googlemock/src/gmock-all.cc ) protobuf_generate_grpc_cpp( - src/proto/grpc/lb/v2/xds_for_test.proto + src/proto/grpc/lb/v2/eds_for_test.proto +) +protobuf_generate_grpc_cpp( + src/proto/grpc/lb/v2/lrs_for_test.proto ) target_include_directories(xds_end2end_test diff --git a/Makefile b/Makefile index ef5963dc441..df0dfa30b40 100644 --- a/Makefile +++ b/Makefile @@ -2700,16 +2700,32 @@ $(GENDIR)/src/proto/grpc/lb/v1/load_balancer.grpc.pb.cc: src/proto/grpc/lb/v1/lo endif ifeq ($(NO_PROTOC),true) -$(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.pb.cc: protoc_dep_error -$(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.cc: protoc_dep_error +$(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc: protoc_dep_error +$(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc: protoc_dep_error else -$(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.pb.cc: src/proto/grpc/lb/v2/xds_for_test.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) +$(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc: src/proto/grpc/lb/v2/eds_for_test.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(E) "[PROTOC] Generating protobuf CC file from $<" $(Q) mkdir -p `dirname $@` $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $< -$(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.cc: src/proto/grpc/lb/v2/xds_for_test.proto $(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) +$(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc: src/proto/grpc/lb/v2/eds_for_test.proto $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) + $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $< +endif + +ifeq ($(NO_PROTOC),true) +$(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.pb.cc: protoc_dep_error +$(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.cc: protoc_dep_error +else + +$(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.pb.cc: src/proto/grpc/lb/v2/lrs_for_test.proto $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc + $(E) "[PROTOC] Generating protobuf CC file from $<" + $(Q) mkdir -p `dirname $@` + $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --cpp_out=$(GENDIR) $< + +$(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.cc: src/proto/grpc/lb/v2/lrs_for_test.proto $(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.pb.cc $(PROTOBUF_DEP) $(PROTOC_PLUGINS) $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc $(E) "[GRPC] Generating gRPC's protobuf service CC file from $<" $(Q) mkdir -p `dirname $@` $(Q) $(PROTOC) -Ithird_party/protobuf/src -I. --grpc_out=$(GENDIR) --plugin=protoc-gen-grpc=$(PROTOC_PLUGINS_DIR)/grpc_cpp_plugin$(EXECUTABLE_SUFFIX) $< @@ -19744,7 +19760,8 @@ endif XDS_END2END_TEST_SRC = \ - $(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.cc \ + $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc \ + $(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.cc \ test/cpp/end2end/xds_end2end_test.cc \ XDS_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(XDS_END2END_TEST_SRC)))) @@ -19776,7 +19793,9 @@ endif endif -$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v2/xds_for_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v2/eds_for_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a + +$(OBJDIR)/$(CONFIG)/src/proto/grpc/lb/v2/lrs_for_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a @@ -19787,7 +19806,7 @@ ifneq ($(NO_DEPS),true) -include $(XDS_END2END_TEST_OBJS:.o=.dep) endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/xds_for_test.grpc.pb.cc +$(OBJDIR)/$(CONFIG)/test/cpp/end2end/xds_end2end_test.o: $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/eds_for_test.grpc.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.pb.cc $(GENDIR)/src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.cc PUBLIC_HEADERS_MUST_BE_C89_SRC = \ diff --git a/build.yaml b/build.yaml index 0b8c1955d27..4bb2e401457 100644 --- a/build.yaml +++ b/build.yaml @@ -833,7 +833,6 @@ filegroups: - grpc_base - grpc_client_channel - grpc_resolver_fake - - grpc_lb_upb - name: grpc_lb_policy_xds_secure headers: - src/core/ext/filters/client_channel/lb_policy/xds/xds.h @@ -852,7 +851,6 @@ filegroups: - grpc_client_channel - grpc_resolver_fake - grpc_secure - - grpc_lb_upb - name: grpc_lb_subchannel_list headers: - src/core/ext/filters/client_channel/lb_policy/subchannel_list.h @@ -6027,7 +6025,8 @@ targets: build: test language: c++ src: - - src/proto/grpc/lb/v2/xds_for_test.proto + - src/proto/grpc/lb/v2/eds_for_test.proto + - src/proto/grpc/lb/v2/lrs_for_test.proto - test/cpp/end2end/xds_end2end_test.cc deps: - grpc++_test_util diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index ac79ae92d5a..767a22e9e59 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -708,7 +708,7 @@ class CallData { LbCallState lb_call_state_; RefCountedPtr connected_subchannel_; void (*lb_recv_trailing_metadata_ready_)( - void* user_data, + void* user_data, grpc_error* error, LoadBalancingPolicy::MetadataInterface* recv_trailing_metadata, LoadBalancingPolicy::CallState* call_state) = nullptr; void* lb_recv_trailing_metadata_ready_user_data_ = nullptr; @@ -2160,8 +2160,8 @@ void CallData::RecvTrailingMetadataReadyForLoadBalancingPolicy( // Invoke callback to LB policy. Metadata trailing_metadata(calld, calld->recv_trailing_metadata_); calld->lb_recv_trailing_metadata_ready_( - calld->lb_recv_trailing_metadata_ready_user_data_, &trailing_metadata, - &calld->lb_call_state_); + calld->lb_recv_trailing_metadata_ready_user_data_, error, + &trailing_metadata, &calld->lb_call_state_); // Chain to original callback. GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_ready_, GRPC_ERROR_REF(error)); diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h index a205d333ae8..ea4962cdb77 100644 --- a/src/core/ext/filters/client_channel/lb_policy.h +++ b/src/core/ext/filters/client_channel/lb_policy.h @@ -174,8 +174,11 @@ class LoadBalancingPolicy : public InternallyRefCounted { /// modified by the callback. The callback does not take ownership, /// however, so any data that needs to be used after returning must /// be copied. + // TODO(roth): Replace grpc_error with something better before we allow + // people outside of gRPC team to use this API. void (*recv_trailing_metadata_ready)( - void* user_data, MetadataInterface* recv_trailing_metadata, + void* user_data, grpc_error* error, + MetadataInterface* recv_trailing_metadata, CallState* call_state) = nullptr; void* recv_trailing_metadata_ready_user_data = nullptr; }; diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc index 851b84b2a71..58c1ebad39e 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc @@ -108,6 +108,7 @@ #define GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS 120 #define GRPC_XDS_RECONNECT_JITTER 0.2 #define GRPC_XDS_DEFAULT_FALLBACK_TIMEOUT_MS 10000 +#define GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS 1000 namespace grpc_core { @@ -154,95 +155,192 @@ class XdsLb : public LoadBalancingPolicy { void ResetBackoffLocked() override; private: - /// Contains a channel to the LB server and all the data related to the - /// channel. - class BalancerChannelState - : public InternallyRefCounted { + // Contains a channel to the LB server and all the data related to the + // channel. Holds a ref to the xds policy. + class LbChannelState : public InternallyRefCounted { public: - /// Contains a call to the LB server and all the data related to the call. - class BalancerCallState : public InternallyRefCounted { + // An LB call wrapper that can restart a call upon failure. Holds a ref to + // the LB channel. The template parameter is the kind of wrapped LB call. + template + class RetryableLbCall : public InternallyRefCounted> { public: - explicit BalancerCallState(RefCountedPtr lb_chand); + explicit RetryableLbCall(RefCountedPtr lb_chand); - // It's the caller's responsibility to ensure that Orphan() is called from - // inside the combiner. void Orphan() override; - void StartQuery(); + void OnCallFinishedLocked(); - RefCountedPtr client_stats() const { - return client_stats_; - } + T* lb_calld() const { return lb_calld_.get(); } + LbChannelState* lb_chand() const { return lb_chand_.get(); } + + private: + void StartNewCallLocked(); + void StartRetryTimerLocked(); + static void OnRetryTimerLocked(void* arg, grpc_error* error); + + // The wrapped LB call that talks to the LB server. It's instantiated + // every time we start a new call. It's null during call retry backoff. + OrphanablePtr lb_calld_; + // The owing LB channel. + RefCountedPtr lb_chand_; + + // Retry state. + BackOff backoff_; + grpc_timer retry_timer_; + grpc_closure on_retry_timer_; + bool retry_timer_callback_pending_ = false; + + bool shutting_down_ = false; + }; + + // Contains an EDS call to the LB server. + class EdsCallState : public InternallyRefCounted { + public: + // The ctor and dtor should not be used directly. + explicit EdsCallState( + RefCountedPtr> parent); + ~EdsCallState() override; + void Orphan() override; + + RetryableLbCall* parent() const { return parent_.get(); } + LbChannelState* lb_chand() const { return parent_->lb_chand(); } + XdsLb* xdslb_policy() const { return lb_chand()->xdslb_policy(); } bool seen_response() const { return seen_response_; } private: - GRPC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE + static void OnResponseReceivedLocked(void* arg, grpc_error* error); + static void OnStatusReceivedLocked(void* arg, grpc_error* error); + + bool IsCurrentCallOnChannel() const; - ~BalancerCallState(); + // The owning RetryableLbCall<>. + RefCountedPtr> parent_; + bool seen_response_ = false; - XdsLb* xdslb_policy() const { return lb_chand_->xdslb_policy_.get(); } + // Always non-NULL. + grpc_call* lb_call_; - bool IsCurrentCallOnChannel() const { - return this == lb_chand_->lb_calld_.get(); - } + // recv_initial_metadata + grpc_metadata_array initial_metadata_recv_; + + // send_message + grpc_byte_buffer* send_message_payload_ = nullptr; + + // recv_message + grpc_byte_buffer* recv_message_payload_ = nullptr; + grpc_closure on_response_received_; + + // recv_trailing_metadata + grpc_metadata_array trailing_metadata_recv_; + grpc_status_code status_code_; + grpc_slice status_details_; + grpc_closure on_status_received_; + }; + + // Contains an LRS call to the LB server. + class LrsCallState : public InternallyRefCounted { + public: + // The ctor and dtor should not be used directly. + explicit LrsCallState( + RefCountedPtr> parent); + ~LrsCallState() override; - void ScheduleNextClientLoadReportLocked(); - void SendClientLoadReportLocked(); + void Orphan() override; + + void MaybeStartReportingLocked(); + + RetryableLbCall* parent() { return parent_.get(); } + LbChannelState* lb_chand() const { return parent_->lb_chand(); } + XdsLb* xdslb_policy() const { return lb_chand()->xdslb_policy(); } + bool seen_response() const { return seen_response_; } + + private: + // Reports client-side load stats according to a fixed interval. + class Reporter : public InternallyRefCounted { + public: + Reporter(RefCountedPtr parent, + grpc_millis report_interval) + : parent_(std::move(parent)), report_interval_(report_interval) { + GRPC_CLOSURE_INIT( + &on_next_report_timer_, OnNextReportTimerLocked, this, + grpc_combiner_scheduler(xdslb_policy()->combiner())); + GRPC_CLOSURE_INIT( + &on_report_done_, OnReportDoneLocked, this, + grpc_combiner_scheduler(xdslb_policy()->combiner())); + ScheduleNextReportLocked(); + } + + void Orphan() override; - static bool LoadReportCountersAreZero(xds_grpclb_request* request); + private: + void ScheduleNextReportLocked(); + static void OnNextReportTimerLocked(void* arg, grpc_error* error); + void SendReportLocked(); + static void OnReportDoneLocked(void* arg, grpc_error* error); + + bool IsCurrentReporterOnCall() const { + return this == parent_->reporter_.get(); + } + XdsLb* xdslb_policy() const { return parent_->xdslb_policy(); } + + // The owning LRS call. + RefCountedPtr parent_; + + // The load reporting state. + const grpc_millis report_interval_; + bool last_report_counters_were_zero_ = false; + bool next_report_timer_callback_pending_ = false; + grpc_timer next_report_timer_; + grpc_closure on_next_report_timer_; + grpc_closure on_report_done_; + }; - static void MaybeSendClientLoadReportLocked(void* arg, grpc_error* error); static void OnInitialRequestSentLocked(void* arg, grpc_error* error); - static void OnBalancerMessageReceivedLocked(void* arg, grpc_error* error); - static void OnBalancerStatusReceivedLocked(void* arg, grpc_error* error); + static void OnResponseReceivedLocked(void* arg, grpc_error* error); + static void OnStatusReceivedLocked(void* arg, grpc_error* error); + + bool IsCurrentCallOnChannel() const; - // The owning LB channel. - RefCountedPtr lb_chand_; + // The owning RetryableLbCall<>. + RefCountedPtr> parent_; + bool seen_response_ = false; - // The streaming call to the LB server. Always non-NULL. - grpc_call* lb_call_ = nullptr; + // Always non-NULL. + grpc_call* lb_call_; // recv_initial_metadata - grpc_metadata_array lb_initial_metadata_recv_; + grpc_metadata_array initial_metadata_recv_; // send_message grpc_byte_buffer* send_message_payload_ = nullptr; - grpc_closure lb_on_initial_request_sent_; + grpc_closure on_initial_request_sent_; // recv_message grpc_byte_buffer* recv_message_payload_ = nullptr; - grpc_closure lb_on_balancer_message_received_; - bool seen_response_ = false; + grpc_closure on_response_received_; // recv_trailing_metadata - grpc_closure lb_on_balancer_status_received_; - grpc_metadata_array lb_trailing_metadata_recv_; - grpc_status_code lb_call_status_; - grpc_slice lb_call_status_details_; - - // The stats for client-side load reporting associated with this LB call. - // Created after the first serverlist is received. - RefCountedPtr client_stats_; - grpc_millis client_stats_report_interval_ = 0; - grpc_timer client_load_report_timer_; - bool client_load_report_timer_callback_pending_ = false; - bool last_client_load_report_counters_were_zero_ = false; - bool client_load_report_is_due_ = false; - // The closure used for either the load report timer or the callback for - // completion of sending the load report. - grpc_closure client_load_report_closure_; + grpc_metadata_array trailing_metadata_recv_; + grpc_status_code status_code_; + grpc_slice status_details_; + grpc_closure on_status_received_; + + // Load reporting state. + grpc_millis load_reporting_interval_ = 0; + OrphanablePtr reporter_; }; - BalancerChannelState(const char* balancer_name, - const grpc_channel_args& args, - RefCountedPtr parent_xdslb_policy); - ~BalancerChannelState(); + LbChannelState(RefCountedPtr xdslb_policy, const char* balancer_name, + const grpc_channel_args& args); + ~LbChannelState(); void Orphan() override; grpc_channel* channel() const { return channel_; } - BalancerCallState* lb_calld() const { return lb_calld_.get(); } + XdsLb* xdslb_policy() const { return xdslb_policy_.get(); } + EdsCallState* eds_calld() const { return eds_calld_->lb_calld(); } + LrsCallState* lrs_calld() const { return lrs_calld_->lb_calld(); } bool IsCurrentChannel() const { return this == xdslb_policy_->lb_chand_.get(); @@ -250,11 +348,7 @@ class XdsLb : public LoadBalancingPolicy { bool IsPendingChannel() const { return this == xdslb_policy_->pending_lb_chand_.get(); } - bool HasActiveCall() const { return lb_calld_ != nullptr; } - - void StartCallRetryTimerLocked(); - static void OnCallRetryTimerLocked(void* arg, grpc_error* error); - void StartCallLocked(); + bool HasActiveEdsCall() const { return eds_calld_->lb_calld() != nullptr; } void StartConnectivityWatchLocked(); void CancelConnectivityWatchLocked(); @@ -270,29 +364,35 @@ class XdsLb : public LoadBalancingPolicy { grpc_connectivity_state connectivity_ = GRPC_CHANNEL_IDLE; grpc_closure on_connectivity_changed_; - // The data associated with the current LB call. It holds a ref to this LB - // channel. It's instantiated every time we query for backends. It's reset - // whenever the current LB call is no longer needed (e.g., the LB policy is - // shutting down, or the LB call has ended). A non-NULL lb_calld_ always - // contains a non-NULL lb_call_. - OrphanablePtr lb_calld_; - BackOff lb_call_backoff_; - grpc_timer lb_call_retry_timer_; - grpc_closure lb_on_call_retry_; - bool retry_timer_callback_pending_ = false; + // The retryable XDS calls to the LB server. + OrphanablePtr> eds_calld_; + OrphanablePtr> lrs_calld_; }; - // Since pickers are UniquePtrs we use this RefCounted wrapper - // to control references to it by the xds picker and the locality - // entry - class PickerRef : public RefCounted { + // We need this wrapper for the following reasons: + // 1. To process per-locality load reporting. + // 2. Since pickers are UniquePtrs we use this RefCounted wrapper to control + // references to it by the xds picker and the locality entry. + class PickerWrapper : public RefCounted { public: - explicit PickerRef(UniquePtr picker) - : picker_(std::move(picker)) {} - PickResult Pick(PickArgs args) { return picker_->Pick(args); } + PickerWrapper(UniquePtr picker, + RefCountedPtr locality_stats) + : picker_(std::move(picker)), + locality_stats_(std::move(locality_stats)) { + locality_stats_->RefByPicker(); + } + ~PickerWrapper() { locality_stats_->UnrefByPicker(); } + + PickResult Pick(PickArgs args); private: + static void RecordCallCompletion( + void* arg, grpc_error* error, + LoadBalancingPolicy::MetadataInterface* recv_trailing_metadata, + LoadBalancingPolicy::CallState* call_state); + UniquePtr picker_; + RefCountedPtr locality_stats_; }; // The picker will use a stateless weighting algorithm to pick the locality to @@ -304,17 +404,15 @@ class XdsLb : public LoadBalancingPolicy { // proportional to the locality's weight. The start of the range is the // previous value in the vector and is 0 for the first element. using PickerList = - InlinedVector>, 1>; - Picker(RefCountedPtr client_stats, PickerList pickers) - : client_stats_(std::move(client_stats)), - pickers_(std::move(pickers)) {} + InlinedVector>, 1>; + explicit Picker(PickerList pickers) : pickers_(std::move(pickers)) {} PickResult Pick(PickArgs args) override; private: - // Calls the picker of the locality that the key falls within + // Calls the picker of the locality that the key falls within. PickResult PickFromLocality(const uint32_t key, PickArgs args); - RefCountedPtr client_stats_; + PickerList pickers_; }; @@ -386,6 +484,7 @@ class XdsLb : public LoadBalancingPolicy { RefCountedPtr entry_; LoadBalancingPolicy* child_ = nullptr; }; + // Methods for dealing with the child policy. OrphanablePtr CreateChildPolicyLocked( const char* name, const grpc_channel_args* args); @@ -396,7 +495,7 @@ class XdsLb : public LoadBalancingPolicy { RefCountedPtr name_; OrphanablePtr child_policy_; OrphanablePtr pending_child_policy_; - RefCountedPtr picker_ref_; + RefCountedPtr picker_wrapper_; grpc_connectivity_state connectivity_state_; uint32_t locality_weight_; }; @@ -409,6 +508,7 @@ class XdsLb : public LoadBalancingPolicy { private: void PruneLocalities(const XdsLocalityList& locality_list); + Map, OrphanablePtr, XdsLocalityName::Less> map_; @@ -428,7 +528,7 @@ class XdsLb : public LoadBalancingPolicy { // found. Does nothing upon failure. void ParseLbConfig(const ParsedXdsConfig* xds_config); - BalancerChannelState* LatestLbChannel() const { + LbChannelState* LatestLbChannel() const { return pending_lb_chand_ != nullptr ? pending_lb_chand_.get() : lb_chand_.get(); } @@ -454,8 +554,8 @@ class XdsLb : public LoadBalancingPolicy { bool shutting_down_ = false; // The channel for communicating with the LB server. - OrphanablePtr lb_chand_; - OrphanablePtr pending_lb_chand_; + OrphanablePtr lb_chand_; + OrphanablePtr pending_lb_chand_; // Timeout in milliseconds for the LB call. 0 means no deadline. int lb_call_timeout_ms_ = 0; @@ -493,8 +593,45 @@ class XdsLb : public LoadBalancingPolicy { // TODO(mhaidry) : Add a pending locality map that may be swapped with the // the current one when new localities in the pending map are ready // to accept connections + + // The stats for client-side load reporting. + XdsClientStats client_stats_; }; +// +// XdsLb::PickerWrapper::Pick +// + +LoadBalancingPolicy::PickResult XdsLb::PickerWrapper::Pick( + LoadBalancingPolicy::PickArgs args) { + // Forward the pick to the picker returned from the child policy. + PickResult result = picker_->Pick(args); + if (result.type != PickResult::PICK_COMPLETE || + result.subchannel == nullptr || locality_stats_ == nullptr) { + return result; + } + // Record a call started. + locality_stats_->AddCallStarted(); + // Intercept the recv_trailing_metadata op to record call completion. + result.recv_trailing_metadata_ready = RecordCallCompletion; + result.recv_trailing_metadata_ready_user_data = + locality_stats_->Ref(DEBUG_LOCATION, "LocalityStats+call").release(); + return result; +} + +// Note that the following callback does not run in either the control plane +// combiner or the data plane combiner. +void XdsLb::PickerWrapper::RecordCallCompletion( + void* arg, grpc_error* error, + LoadBalancingPolicy::MetadataInterface* recv_trailing_metadata, + LoadBalancingPolicy::CallState* call_state) { + XdsClientStats::LocalityStats* locality_stats = + static_cast(arg); + const bool call_failed = error != GRPC_ERROR_NONE; + locality_stats->AddCallFinished(call_failed); + locality_stats->Unref(DEBUG_LOCATION, "LocalityStats+call"); +} + // // XdsLb::Picker // @@ -506,13 +643,7 @@ XdsLb::PickResult XdsLb::Picker::Pick(PickArgs args) { (rand() * pickers_[pickers_.size() - 1].first) / RAND_MAX; // Forward pick to whichever locality maps to the range in which the // random number falls in. - PickResult result = PickFromLocality(key, args); - // If pick succeeded, add client stats. - if (result.type == PickResult::PICK_COMPLETE && - result.subchannel != nullptr && client_stats_ != nullptr) { - // TODO(roth): Add support for client stats. - } - return result; + return PickFromLocality(key, args); } XdsLb::PickResult XdsLb::Picker::PickFromLocality(const uint32_t key, @@ -620,99 +751,46 @@ void XdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity, } // -// XdsLb::BalancerChannelState +// XdsLb::LbChannelState // -XdsLb::BalancerChannelState::BalancerChannelState( - const char* balancer_name, const grpc_channel_args& args, - RefCountedPtr parent_xdslb_policy) - : InternallyRefCounted(&grpc_lb_xds_trace), - xdslb_policy_(std::move(parent_xdslb_policy)), - lb_call_backoff_( - BackOff::Options() - .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * - 1000) - .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) - .set_jitter(GRPC_XDS_RECONNECT_JITTER) - .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { - GRPC_CLOSURE_INIT(&on_connectivity_changed_, - &XdsLb::BalancerChannelState::OnConnectivityChangedLocked, +XdsLb::LbChannelState::LbChannelState(RefCountedPtr xdslb_policy, + const char* balancer_name, + const grpc_channel_args& args) + : InternallyRefCounted(&grpc_lb_xds_trace), + xdslb_policy_(std::move(xdslb_policy)) { + GRPC_CLOSURE_INIT(&on_connectivity_changed_, OnConnectivityChangedLocked, this, grpc_combiner_scheduler(xdslb_policy_->combiner())); channel_ = xdslb_policy_->channel_control_helper()->CreateChannel( balancer_name, args); GPR_ASSERT(channel_ != nullptr); - StartCallLocked(); -} - -XdsLb::BalancerChannelState::~BalancerChannelState() { - xdslb_policy_.reset(DEBUG_LOCATION, "BalancerChannelState"); - grpc_channel_destroy(channel_); -} - -void XdsLb::BalancerChannelState::Orphan() { - shutting_down_ = true; - lb_calld_.reset(); - if (retry_timer_callback_pending_) grpc_timer_cancel(&lb_call_retry_timer_); - Unref(DEBUG_LOCATION, "lb_channel_orphaned"); + eds_calld_.reset(New>( + Ref(DEBUG_LOCATION, "LbChannelState+eds"))); + lrs_calld_.reset(New>( + Ref(DEBUG_LOCATION, "LbChannelState+lrs"))); } -void XdsLb::BalancerChannelState::StartCallRetryTimerLocked() { - grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); +XdsLb::LbChannelState::~LbChannelState() { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Failed to connect to LB server (lb_chand: %p)...", - xdslb_policy_.get(), this); - grpc_millis timeout = next_try - ExecCtx::Get()->Now(); - if (timeout > 0) { - gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active in %" PRId64 "ms.", - xdslb_policy_.get(), timeout); - } else { - gpr_log(GPR_INFO, "[xdslb %p] ... retry_timer_active immediately.", - xdslb_policy_.get()); - } - } - Ref(DEBUG_LOCATION, "on_balancer_call_retry_timer").release(); - GRPC_CLOSURE_INIT(&lb_on_call_retry_, &OnCallRetryTimerLocked, this, - grpc_combiner_scheduler(xdslb_policy_->combiner())); - grpc_timer_init(&lb_call_retry_timer_, next_try, &lb_on_call_retry_); - retry_timer_callback_pending_ = true; -} - -void XdsLb::BalancerChannelState::OnCallRetryTimerLocked(void* arg, - grpc_error* error) { - BalancerChannelState* lb_chand = static_cast(arg); - lb_chand->retry_timer_callback_pending_ = false; - if (!lb_chand->shutting_down_ && error == GRPC_ERROR_NONE && - lb_chand->lb_calld_ == nullptr) { - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Restarting call to LB server (lb_chand: %p)", - lb_chand->xdslb_policy_.get(), lb_chand); - } - lb_chand->StartCallLocked(); + gpr_log(GPR_INFO, "[xdslb %p] Destroying LB channel %p", xdslb_policy(), + this); } - lb_chand->Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer"); + grpc_channel_destroy(channel_); } -void XdsLb::BalancerChannelState::StartCallLocked() { - if (shutting_down_) return; - GPR_ASSERT(channel_ != nullptr); - GPR_ASSERT(lb_calld_ == nullptr); - lb_calld_ = MakeOrphanable(Ref()); - if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, - "[xdslb %p] Query for backends (lb_chand: %p, lb_calld: %p)", - xdslb_policy_.get(), this, lb_calld_.get()); - } - lb_calld_->StartQuery(); +void XdsLb::LbChannelState::Orphan() { + shutting_down_ = true; + eds_calld_.reset(); + lrs_calld_.reset(); + Unref(DEBUG_LOCATION, "LbChannelState+orphaned"); } -void XdsLb::BalancerChannelState::StartConnectivityWatchLocked() { +void XdsLb::LbChannelState::StartConnectivityWatchLocked() { grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_)); GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); // Ref held by callback. - Ref(DEBUG_LOCATION, "watch_lb_channel_connectivity").release(); + Ref(DEBUG_LOCATION, "LbChannelState+start_watch").release(); grpc_client_channel_watch_connectivity_state( client_channel_elem, grpc_polling_entity_create_from_pollset_set( @@ -720,7 +798,7 @@ void XdsLb::BalancerChannelState::StartConnectivityWatchLocked() { &connectivity_, &on_connectivity_changed_, nullptr); } -void XdsLb::BalancerChannelState::CancelConnectivityWatchLocked() { +void XdsLb::LbChannelState::CancelConnectivityWatchLocked() { grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel_)); GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter); @@ -731,9 +809,9 @@ void XdsLb::BalancerChannelState::CancelConnectivityWatchLocked() { nullptr, &on_connectivity_changed_, nullptr); } -void XdsLb::BalancerChannelState::OnConnectivityChangedLocked( - void* arg, grpc_error* error) { - BalancerChannelState* self = static_cast(arg); +void XdsLb::LbChannelState::OnConnectivityChangedLocked(void* arg, + grpc_error* error) { + LbChannelState* self = static_cast(arg); if (!self->shutting_down_ && self->xdslb_policy_->fallback_at_startup_checks_pending_) { if (self->connectivity_ != GRPC_CHANNEL_TRANSIENT_FAILURE) { @@ -760,22 +838,113 @@ void XdsLb::BalancerChannelState::OnConnectivityChangedLocked( self->xdslb_policy_->UpdateFallbackPolicyLocked(); } // Done watching connectivity state, so drop ref. - self->Unref(DEBUG_LOCATION, "watch_lb_channel_connectivity"); + self->Unref(DEBUG_LOCATION, "LbChannelState+watch_done"); } // -// XdsLb::BalancerChannelState::BalancerCallState +// XdsLb::LbChannelState::RetryableLbCall<> // -XdsLb::BalancerChannelState::BalancerCallState::BalancerCallState( - RefCountedPtr lb_chand) - : InternallyRefCounted(&grpc_lb_xds_trace), - lb_chand_(std::move(lb_chand)) { - GPR_ASSERT(xdslb_policy() != nullptr); - GPR_ASSERT(!xdslb_policy()->shutting_down_); +template +XdsLb::LbChannelState::RetryableLbCall::RetryableLbCall( + RefCountedPtr lb_chand) + : lb_chand_(std::move(lb_chand)), + backoff_( + BackOff::Options() + .set_initial_backoff(GRPC_XDS_INITIAL_CONNECT_BACKOFF_SECONDS * + 1000) + .set_multiplier(GRPC_XDS_RECONNECT_BACKOFF_MULTIPLIER) + .set_jitter(GRPC_XDS_RECONNECT_JITTER) + .set_max_backoff(GRPC_XDS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)) { + GRPC_CLOSURE_INIT( + &on_retry_timer_, OnRetryTimerLocked, this, + grpc_combiner_scheduler(lb_chand_->xdslb_policy()->combiner())); + StartNewCallLocked(); +} + +template +void XdsLb::LbChannelState::RetryableLbCall::Orphan() { + shutting_down_ = true; + lb_calld_.reset(); + if (retry_timer_callback_pending_) grpc_timer_cancel(&retry_timer_); + this->Unref(DEBUG_LOCATION, "RetryableLbCall+orphaned"); +} + +template +void XdsLb::LbChannelState::RetryableLbCall::OnCallFinishedLocked() { + const bool seen_response = lb_calld_->seen_response(); + lb_calld_.reset(); + if (seen_response) { + // If we lost connection to the LB server, reset backoff and restart the LB + // call immediately. + backoff_.Reset(); + StartNewCallLocked(); + } else { + // If we failed to connect to the LB server, retry later. + StartRetryTimerLocked(); + } +} + +template +void XdsLb::LbChannelState::RetryableLbCall::StartNewCallLocked() { + if (shutting_down_) return; + GPR_ASSERT(lb_chand_->channel_ != nullptr); + GPR_ASSERT(lb_calld_ == nullptr); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log(GPR_INFO, + "[xdslb %p] Start new call from retryable call (lb_chand: %p, " + "retryable call: %p)", + lb_chand()->xdslb_policy(), lb_chand(), this); + } + lb_calld_ = MakeOrphanable( + this->Ref(DEBUG_LOCATION, "RetryableLbCall+start_new_call")); +} + +template +void XdsLb::LbChannelState::RetryableLbCall::StartRetryTimerLocked() { + if (shutting_down_) return; + const grpc_millis next_attempt_time = backoff_.NextAttemptTime(); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + grpc_millis timeout = GPR_MAX(next_attempt_time - ExecCtx::Get()->Now(), 0); + gpr_log(GPR_INFO, + "[xdslb %p] Failed to connect to LB server (lb_chand: %p) " + "retry timer will fire in %" PRId64 "ms.", + lb_chand()->xdslb_policy(), lb_chand(), timeout); + } + this->Ref(DEBUG_LOCATION, "RetryableLbCall+retry_timer_start").release(); + grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_); + retry_timer_callback_pending_ = true; +} + +template +void XdsLb::LbChannelState::RetryableLbCall::OnRetryTimerLocked( + void* arg, grpc_error* error) { + RetryableLbCall* lb_calld = static_cast(arg); + lb_calld->retry_timer_callback_pending_ = false; + if (!lb_calld->shutting_down_ && error == GRPC_ERROR_NONE) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log(GPR_INFO, + "[xdslb %p] Retry timer fires (lb_chand: %p, retryable call: %p)", + lb_calld->lb_chand()->xdslb_policy(), lb_calld->lb_chand(), + lb_calld); + } + lb_calld->StartNewCallLocked(); + } + lb_calld->Unref(DEBUG_LOCATION, "RetryableLbCall+retry_timer_done"); +} + +// +// XdsLb::LbChannelState::EdsCallState +// + +XdsLb::LbChannelState::EdsCallState::EdsCallState( + RefCountedPtr> parent) + : InternallyRefCounted(&grpc_lb_xds_trace), + parent_(std::move(parent)) { // Init the LB call. Note that the LB call will progress every time there's - // activity in xdslb_policy_->interested_parties(), which is comprised of + // activity in xdslb_policy()->interested_parties(), which is comprised of // the polling entities from client_channel. + GPR_ASSERT(xdslb_policy() != nullptr); GPR_ASSERT(xdslb_policy()->server_name_ != nullptr); GPR_ASSERT(xdslb_policy()->server_name_[0] != '\0'); const grpc_millis deadline = @@ -784,10 +953,11 @@ XdsLb::BalancerChannelState::BalancerCallState::BalancerCallState( : ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_; // Create an LB call with the specified method name. lb_call_ = grpc_channel_create_pollset_set_call( - lb_chand_->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, + lb_chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, xdslb_policy()->interested_parties(), GRPC_MDSTR_SLASH_ENVOY_DOT_API_DOT_V2_DOT_ENDPOINTDISCOVERYSERVICE_SLASH_STREAMENDPOINTS, nullptr, deadline, nullptr); + GPR_ASSERT(lb_call_ != nullptr); // Init the LB call request payload. grpc_slice request_payload_slice = XdsEdsRequestCreateAndEncode(xdslb_policy()->server_name_); @@ -795,48 +965,18 @@ XdsLb::BalancerChannelState::BalancerCallState::BalancerCallState( grpc_raw_byte_buffer_create(&request_payload_slice, 1); grpc_slice_unref_internal(request_payload_slice); // Init other data associated with the LB call. - grpc_metadata_array_init(&lb_initial_metadata_recv_); - grpc_metadata_array_init(&lb_trailing_metadata_recv_); - GRPC_CLOSURE_INIT(&lb_on_initial_request_sent_, OnInitialRequestSentLocked, - this, grpc_combiner_scheduler(xdslb_policy()->combiner())); - GRPC_CLOSURE_INIT(&lb_on_balancer_message_received_, - OnBalancerMessageReceivedLocked, this, + grpc_metadata_array_init(&initial_metadata_recv_); + grpc_metadata_array_init(&trailing_metadata_recv_); + GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this, grpc_combiner_scheduler(xdslb_policy()->combiner())); - GRPC_CLOSURE_INIT(&lb_on_balancer_status_received_, - OnBalancerStatusReceivedLocked, this, + GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this, grpc_combiner_scheduler(xdslb_policy()->combiner())); -} - -XdsLb::BalancerChannelState::BalancerCallState::~BalancerCallState() { - GPR_ASSERT(lb_call_ != nullptr); - grpc_call_unref(lb_call_); - grpc_metadata_array_destroy(&lb_initial_metadata_recv_); - grpc_metadata_array_destroy(&lb_trailing_metadata_recv_); - grpc_byte_buffer_destroy(send_message_payload_); - grpc_byte_buffer_destroy(recv_message_payload_); - grpc_slice_unref_internal(lb_call_status_details_); -} - -void XdsLb::BalancerChannelState::BalancerCallState::Orphan() { - GPR_ASSERT(lb_call_ != nullptr); - // If we are here because xdslb_policy wants to cancel the call, - // lb_on_balancer_status_received_ will complete the cancellation and clean - // up. Otherwise, we are here because xdslb_policy has to orphan a failed - // call, then the following cancellation will be a no-op. - grpc_call_cancel(lb_call_, nullptr); - if (client_load_report_timer_callback_pending_) { - grpc_timer_cancel(&client_load_report_timer_); - } - // Note that the initial ref is hold by lb_on_balancer_status_received_ - // instead of the caller of this function. So the corresponding unref happens - // in lb_on_balancer_status_received_ instead of here. -} - -void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() { - GPR_ASSERT(lb_call_ != nullptr); + // Start the call. if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - gpr_log(GPR_INFO, "[xdslb %p] Starting LB call (lb_calld: %p, lb_call: %p)", - xdslb_policy(), this, lb_call_); + gpr_log(GPR_INFO, + "[xdslb %p] Starting EDS call (lb_chand: %p, lb_calld: %p, " + "lb_call: %p)", + xdslb_policy(), lb_chand(), this, lb_call_); } // Create the ops. grpc_call_error call_error; @@ -856,19 +996,14 @@ void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() { op->flags = 0; op->reserved = nullptr; op++; - // TODO(roth): We currently track this ref manually. Once the - // ClosureRef API is ready, we should pass the RefCountedPtr<> along - // with the callback. - auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent"); - self.release(); - call_error = grpc_call_start_batch_and_execute( - lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_); + call_error = grpc_call_start_batch_and_execute(lb_call_, ops, + (size_t)(op - ops), nullptr); GPR_ASSERT(GRPC_CALL_OK == call_error); // Op: recv initial metadata. op = ops; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata.recv_initial_metadata = - &lb_initial_metadata_recv_; + &initial_metadata_recv_; op->flags = 0; op->reserved = nullptr; op++; @@ -878,21 +1013,16 @@ void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() { op->flags = 0; op->reserved = nullptr; op++; - // TODO(roth): We currently track this ref manually. Once the - // ClosureRef API is ready, we should pass the RefCountedPtr<> along - // with the callback. - self = Ref(DEBUG_LOCATION, "on_message_received"); - self.release(); + Ref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked").release(); call_error = grpc_call_start_batch_and_execute( - lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_); + lb_call_, ops, (size_t)(op - ops), &on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); // Op: recv server status. op = ops; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; - op->data.recv_status_on_client.trailing_metadata = - &lb_trailing_metadata_recv_; - op->data.recv_status_on_client.status = &lb_call_status_; - op->data.recv_status_on_client.status_details = &lb_call_status_details_; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_; + op->data.recv_status_on_client.status = &status_code_; + op->data.recv_status_on_client.status_details = &status_details_; op->flags = 0; op->reserved = nullptr; op++; @@ -900,113 +1030,49 @@ void XdsLb::BalancerChannelState::BalancerCallState::StartQuery() { // ref instead of a new ref. When it's invoked, it's the initial ref that is // unreffed. call_error = grpc_call_start_batch_and_execute( - lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_); + lb_call_, ops, (size_t)(op - ops), &on_status_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); } -void XdsLb::BalancerChannelState::BalancerCallState:: - ScheduleNextClientLoadReportLocked() { - const grpc_millis next_client_load_report_time = - ExecCtx::Get()->Now() + client_stats_report_interval_; - GRPC_CLOSURE_INIT(&client_load_report_closure_, - MaybeSendClientLoadReportLocked, this, - grpc_combiner_scheduler(xdslb_policy()->combiner())); - grpc_timer_init(&client_load_report_timer_, next_client_load_report_time, - &client_load_report_closure_); - client_load_report_timer_callback_pending_ = true; +XdsLb::LbChannelState::EdsCallState::~EdsCallState() { + grpc_metadata_array_destroy(&initial_metadata_recv_); + grpc_metadata_array_destroy(&trailing_metadata_recv_); + grpc_byte_buffer_destroy(send_message_payload_); + grpc_byte_buffer_destroy(recv_message_payload_); + grpc_slice_unref_internal(status_details_); + GPR_ASSERT(lb_call_ != nullptr); + grpc_call_unref(lb_call_); } -void XdsLb::BalancerChannelState::BalancerCallState:: - MaybeSendClientLoadReportLocked(void* arg, grpc_error* error) { - BalancerCallState* lb_calld = static_cast(arg); - lb_calld->client_load_report_timer_callback_pending_ = false; - if (error != GRPC_ERROR_NONE || !lb_calld->IsCurrentCallOnChannel()) { - lb_calld->Unref(DEBUG_LOCATION, "client_load_report"); - return; - } - // If we've already sent the initial request, then we can go ahead and send - // the load report. Otherwise, we need to wait until the initial request has - // been sent to send this (see OnInitialRequestSentLocked()). - if (lb_calld->send_message_payload_ == nullptr) { - lb_calld->SendClientLoadReportLocked(); - } else { - lb_calld->client_load_report_is_due_ = true; - } -} - -bool XdsLb::BalancerChannelState::BalancerCallState::LoadReportCountersAreZero( - xds_grpclb_request* request) { - const grpc_lb_v1_ClientStats* cstats = - grpc_lb_v1_LoadBalanceRequest_client_stats(request); - if (cstats == nullptr) { - return true; - } - size_t drop_count; - grpc_lb_v1_ClientStats_calls_finished_with_drop(cstats, &drop_count); - return grpc_lb_v1_ClientStats_num_calls_started(cstats) == 0 && - grpc_lb_v1_ClientStats_num_calls_finished(cstats) == 0 && - grpc_lb_v1_ClientStats_num_calls_finished_with_client_failed_to_send( - cstats) == 0 && - grpc_lb_v1_ClientStats_num_calls_finished_known_received(cstats) == - 0 && - drop_count == 0; -} - -// TODO(vpowar): Use LRS to send the client Load Report. -void XdsLb::BalancerChannelState::BalancerCallState:: - SendClientLoadReportLocked() { - // Construct message payload. - GPR_ASSERT(send_message_payload_ == nullptr); - upb::Arena arena; - xds_grpclb_request* request = xds_grpclb_load_report_request_create_locked( - client_stats_.get(), arena.ptr()); - // Skip client load report if the counters were all zero in the last - // report and they are still zero in this one. - if (LoadReportCountersAreZero(request)) { - if (last_client_load_report_counters_were_zero_) { - ScheduleNextClientLoadReportLocked(); - return; - } - last_client_load_report_counters_were_zero_ = true; - } else { - last_client_load_report_counters_were_zero_ = false; - } - // TODO(vpowar): Send the report on LRS stream. +void XdsLb::LbChannelState::EdsCallState::Orphan() { + GPR_ASSERT(lb_call_ != nullptr); + // If we are here because xdslb_policy wants to cancel the call, + // on_status_received_ will complete the cancellation and clean up. Otherwise, + // we are here because xdslb_policy has to orphan a failed call, then the + // following cancellation will be a no-op. + grpc_call_cancel(lb_call_, nullptr); + // Note that the initial ref is hold by on_status_received_. So the + // corresponding unref happens in on_status_received_ instead of here. } -void XdsLb::BalancerChannelState::BalancerCallState::OnInitialRequestSentLocked( +void XdsLb::LbChannelState::EdsCallState::OnResponseReceivedLocked( void* arg, grpc_error* error) { - BalancerCallState* lb_calld = static_cast(arg); - grpc_byte_buffer_destroy(lb_calld->send_message_payload_); - lb_calld->send_message_payload_ = nullptr; - // If we attempted to send a client load report before the initial request was - // sent (and this lb_calld is still in use), send the load report now. - if (lb_calld->client_load_report_is_due_ && - lb_calld->IsCurrentCallOnChannel()) { - lb_calld->SendClientLoadReportLocked(); - lb_calld->client_load_report_is_due_ = false; - } - lb_calld->Unref(DEBUG_LOCATION, "on_initial_request_sent"); -} - -void XdsLb::BalancerChannelState::BalancerCallState:: - OnBalancerMessageReceivedLocked(void* arg, grpc_error* error) { - BalancerCallState* lb_calld = static_cast(arg); - XdsLb* xdslb_policy = lb_calld->xdslb_policy(); + EdsCallState* eds_calld = static_cast(arg); + LbChannelState* lb_chand = eds_calld->lb_chand(); + XdsLb* xdslb_policy = eds_calld->xdslb_policy(); // Empty payload means the LB call was cancelled. - if (!lb_calld->IsCurrentCallOnChannel() || - lb_calld->recv_message_payload_ == nullptr) { - lb_calld->Unref(DEBUG_LOCATION, "on_message_received"); + if (!eds_calld->IsCurrentCallOnChannel() || + eds_calld->recv_message_payload_ == nullptr) { + eds_calld->Unref(DEBUG_LOCATION, "EDS+OnResponseReceivedLocked"); return; } - lb_calld->seen_response_ = true; // Read the response. grpc_byte_buffer_reader bbr; - grpc_byte_buffer_reader_init(&bbr, lb_calld->recv_message_payload_); + grpc_byte_buffer_reader_init(&bbr, eds_calld->recv_message_payload_); grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr); grpc_byte_buffer_reader_destroy(&bbr); - grpc_byte_buffer_destroy(lb_calld->recv_message_payload_); - lb_calld->recv_message_payload_ = nullptr; + grpc_byte_buffer_destroy(eds_calld->recv_message_payload_); + eds_calld->recv_message_payload_ = nullptr; // TODO(juanlishen): When we convert this to use the xds protocol, the // balancer will send us a fallback timeout such that we should go into // fallback mode if we have lost contact with the balancer after a certain @@ -1037,6 +1103,7 @@ void XdsLb::BalancerChannelState::BalancerCallState:: gpr_free(response_slice_str); return; } + eds_calld->seen_response_ = true; if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { gpr_log(GPR_INFO, "[xdslb %p] EDS response with %" PRIuPTR " localities received", @@ -1066,25 +1133,21 @@ void XdsLb::BalancerChannelState::BalancerCallState:: // Note that this call can't be on a discarded pending channel, because // such channels don't have any current call but we have checked this call // is a current call. - if (!lb_calld->lb_chand_->IsCurrentChannel()) { + if (!lb_chand->IsCurrentChannel()) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { gpr_log(GPR_INFO, - "[xdslb %p] Promoting pending LB channel %p to replace " - "current LB channel %p", - xdslb_policy, lb_calld->lb_chand_.get(), - lb_calld->xdslb_policy()->lb_chand_.get()); + "[xdslb %p] Pending LB channel %p receives EDS response; " + "promoting it to replace current LB channel %p", + xdslb_policy, lb_chand, xdslb_policy->lb_chand_.get()); } - lb_calld->xdslb_policy()->lb_chand_ = - std::move(lb_calld->xdslb_policy()->pending_lb_chand_); - } - // Start sending client load report only after we start using the - // serverlist returned from the current LB call. - if (lb_calld->client_stats_report_interval_ > 0 && - lb_calld->client_stats_ == nullptr) { - lb_calld->client_stats_ = MakeRefCounted(); - lb_calld->Ref(DEBUG_LOCATION, "client_load_report").release(); - lb_calld->ScheduleNextClientLoadReportLocked(); + // TODO(juanlishen): Maybe promote the pending LB channel when the + // response results a READY locality map. + xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_); } + // At this point, lb_chand must be the current LB channel, so try to start + // load reporting. + LrsCallState* lrs_calld = lb_chand->lrs_calld_->lb_calld(); + if (lrs_calld != nullptr) lrs_calld->MaybeStartReportingLocked(); // Ignore identical update. if (xdslb_policy->locality_list_ == update.locality_list) { if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { @@ -1111,45 +1174,43 @@ void XdsLb::BalancerChannelState::BalancerCallState:: }(); grpc_slice_unref_internal(response_slice); if (xdslb_policy->shutting_down_) { - lb_calld->Unref(DEBUG_LOCATION, "on_message_received+xds_shutdown"); + eds_calld->Unref(DEBUG_LOCATION, + "EDS+OnResponseReceivedLocked+xds_shutdown"); return; } // Keep listening for serverlist updates. grpc_op op; memset(&op, 0, sizeof(op)); op.op = GRPC_OP_RECV_MESSAGE; - op.data.recv_message.recv_message = &lb_calld->recv_message_payload_; + op.data.recv_message.recv_message = &eds_calld->recv_message_payload_; op.flags = 0; op.reserved = nullptr; - GPR_ASSERT(lb_calld->lb_call_ != nullptr); - // Reuse the "OnBalancerMessageReceivedLocked" ref taken in StartQuery(). + GPR_ASSERT(eds_calld->lb_call_ != nullptr); + // Reuse the "EDS+OnResponseReceivedLocked" ref taken in ctor. const grpc_call_error call_error = grpc_call_start_batch_and_execute( - lb_calld->lb_call_, &op, 1, &lb_calld->lb_on_balancer_message_received_); + eds_calld->lb_call_, &op, 1, &eds_calld->on_response_received_); GPR_ASSERT(GRPC_CALL_OK == call_error); } -void XdsLb::BalancerChannelState::BalancerCallState:: - OnBalancerStatusReceivedLocked(void* arg, grpc_error* error) { - BalancerCallState* lb_calld = static_cast(arg); - XdsLb* xdslb_policy = lb_calld->xdslb_policy(); - BalancerChannelState* lb_chand = lb_calld->lb_chand_.get(); - GPR_ASSERT(lb_calld->lb_call_ != nullptr); +void XdsLb::LbChannelState::EdsCallState::OnStatusReceivedLocked( + void* arg, grpc_error* error) { + EdsCallState* eds_calld = static_cast(arg); + LbChannelState* lb_chand = eds_calld->lb_chand(); + XdsLb* xdslb_policy = eds_calld->xdslb_policy(); if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { - char* status_details = - grpc_slice_to_c_string(lb_calld->lb_call_status_details_); + char* status_details = grpc_slice_to_c_string(eds_calld->status_details_); gpr_log(GPR_INFO, - "[xdslb %p] Status from LB server received. Status = %d, details " - "= '%s', (lb_chand: %p, lb_calld: %p, lb_call: %p), error '%s'", - xdslb_policy, lb_calld->lb_call_status_, status_details, lb_chand, - lb_calld, lb_calld->lb_call_, grpc_error_string(error)); + "[xdslb %p] EDS call status received. Status = %d, details " + "= '%s', (lb_chand: %p, eds_calld: %p, lb_call: %p), error '%s'", + xdslb_policy, eds_calld->status_code_, status_details, lb_chand, + eds_calld, eds_calld->lb_call_, grpc_error_string(error)); gpr_free(status_details); } // Ignore status from a stale call. - if (lb_calld->IsCurrentCallOnChannel()) { + if (eds_calld->IsCurrentCallOnChannel()) { // Because this call is the current one on the channel, the channel can't // have been swapped out; otherwise, the call should have been reset. GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel()); - GPR_ASSERT(!xdslb_policy->shutting_down_); if (lb_chand != xdslb_policy->LatestLbChannel()) { // This channel must be the current one and there is a pending one. Swap // in the pending one and we are done. @@ -1157,23 +1218,13 @@ void XdsLb::BalancerChannelState::BalancerCallState:: gpr_log(GPR_INFO, "[xdslb %p] Promoting pending LB channel %p to replace " "current LB channel %p", - xdslb_policy, lb_calld->lb_chand_.get(), - lb_calld->xdslb_policy()->lb_chand_.get()); + xdslb_policy, lb_chand, xdslb_policy->lb_chand_.get()); } xdslb_policy->lb_chand_ = std::move(xdslb_policy->pending_lb_chand_); } else { // This channel is the most recently created one. Try to restart the call // and reresolve. - lb_chand->lb_calld_.reset(); - if (lb_calld->seen_response_) { - // If we lost connection to the LB server, reset the backoff and restart - // the LB call immediately. - lb_chand->lb_call_backoff_.Reset(); - lb_chand->StartCallLocked(); - } else { - // If we failed to connect to the LB server, retry later. - lb_chand->StartCallRetryTimerLocked(); - } + eds_calld->parent_->OnCallFinishedLocked(); xdslb_policy->channel_control_helper()->RequestReresolution(); // If the fallback-at-startup checks are pending, go into fallback mode // immediately. This short-circuits the timeout for the @@ -1189,7 +1240,369 @@ void XdsLb::BalancerChannelState::BalancerCallState:: } } } - lb_calld->Unref(DEBUG_LOCATION, "lb_call_ended"); + eds_calld->Unref(DEBUG_LOCATION, "EDS+OnStatusReceivedLocked"); +} + +bool XdsLb::LbChannelState::EdsCallState::IsCurrentCallOnChannel() const { + // If the retryable EDS call is null (which only happens when the LB channel + // is shutting down), all the EDS calls are stale. + if (lb_chand()->eds_calld_ == nullptr) return false; + return this == lb_chand()->eds_calld_->lb_calld(); +} + +// +// XdsLb::LbChannelState::LrsCallState::Reporter +// + +void XdsLb::LbChannelState::LrsCallState::Reporter::Orphan() { + if (next_report_timer_callback_pending_) { + grpc_timer_cancel(&next_report_timer_); + } +} + +void XdsLb::LbChannelState::LrsCallState::Reporter::ScheduleNextReportLocked() { + const grpc_millis next_report_time = ExecCtx::Get()->Now() + report_interval_; + grpc_timer_init(&next_report_timer_, next_report_time, + &on_next_report_timer_); + next_report_timer_callback_pending_ = true; +} + +void XdsLb::LbChannelState::LrsCallState::Reporter::OnNextReportTimerLocked( + void* arg, grpc_error* error) { + Reporter* self = static_cast(arg); + self->next_report_timer_callback_pending_ = false; + if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) { + self->Unref(DEBUG_LOCATION, "Reporter+timer"); + return; + } + self->SendReportLocked(); +} + +void XdsLb::LbChannelState::LrsCallState::Reporter::SendReportLocked() { + // Create a request that contains the load report. + grpc_slice request_payload_slice = XdsLrsRequestCreateAndEncode( + xdslb_policy()->server_name_, &xdslb_policy()->client_stats_); + // Skip client load report if the counters were all zero in the last + // report and they are still zero in this one. + const bool old_val = last_report_counters_were_zero_; + last_report_counters_were_zero_ = static_cast( + grpc_slice_eq(request_payload_slice, grpc_empty_slice())); + if (old_val && last_report_counters_were_zero_) { + ScheduleNextReportLocked(); + return; + } + parent_->send_message_payload_ = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_slice_unref_internal(request_payload_slice); + // Send the report. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_SEND_MESSAGE; + op.data.send_message.send_message = parent_->send_message_payload_; + grpc_call_error call_error = grpc_call_start_batch_and_execute( + parent_->lb_call_, &op, 1, &on_report_done_); + if (GPR_UNLIKELY(call_error != GRPC_CALL_OK)) { + gpr_log(GPR_ERROR, + "[xdslb %p] lb_calld=%p call_error=%d sending client load report", + xdslb_policy(), this, call_error); + GPR_ASSERT(GRPC_CALL_OK == call_error); + } +} + +void XdsLb::LbChannelState::LrsCallState::Reporter::OnReportDoneLocked( + void* arg, grpc_error* error) { + Reporter* self = static_cast(arg); + grpc_byte_buffer_destroy(self->parent_->send_message_payload_); + self->parent_->send_message_payload_ = nullptr; + if (error != GRPC_ERROR_NONE || !self->IsCurrentReporterOnCall()) { + // If this reporter is no longer the current one on the call, the reason + // might be that it was orphaned for a new one due to config update. + if (!self->IsCurrentReporterOnCall()) { + self->parent_->MaybeStartReportingLocked(); + } + self->Unref(DEBUG_LOCATION, "Reporter+report_done"); + return; + } + self->ScheduleNextReportLocked(); +} + +// +// XdsLb::LbChannelState::LrsCallState +// + +XdsLb::LbChannelState::LrsCallState::LrsCallState( + RefCountedPtr> parent) + : InternallyRefCounted(&grpc_lb_xds_trace), + parent_(std::move(parent)) { + // Init the LB call. Note that the LB call will progress every time there's + // activity in xdslb_policy()->interested_parties(), which is comprised of + // the polling entities from client_channel. + GPR_ASSERT(xdslb_policy() != nullptr); + GPR_ASSERT(xdslb_policy()->server_name_ != nullptr); + GPR_ASSERT(xdslb_policy()->server_name_[0] != '\0'); + const grpc_millis deadline = + xdslb_policy()->lb_call_timeout_ms_ == 0 + ? GRPC_MILLIS_INF_FUTURE + : ExecCtx::Get()->Now() + xdslb_policy()->lb_call_timeout_ms_; + lb_call_ = grpc_channel_create_pollset_set_call( + lb_chand()->channel_, nullptr, GRPC_PROPAGATE_DEFAULTS, + xdslb_policy()->interested_parties(), + GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_LOAD_STATS_DOT_V2_DOT_LOADREPORTINGSERVICE_SLASH_STREAMLOADSTATS, + nullptr, deadline, nullptr); + GPR_ASSERT(lb_call_ != nullptr); + // Init the LB call request payload. + grpc_slice request_payload_slice = + XdsLrsRequestCreateAndEncode(xdslb_policy()->server_name_); + send_message_payload_ = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_slice_unref_internal(request_payload_slice); + // Init other data associated with the LRS call. + grpc_metadata_array_init(&initial_metadata_recv_); + grpc_metadata_array_init(&trailing_metadata_recv_); + GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSentLocked, this, + grpc_combiner_scheduler(xdslb_policy()->combiner())); + GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceivedLocked, this, + grpc_combiner_scheduler(xdslb_policy()->combiner())); + GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceivedLocked, this, + grpc_combiner_scheduler(xdslb_policy()->combiner())); + // Start the call. + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log(GPR_INFO, + "[xdslb %p] Starting LRS call (lb_chand: %p, lb_calld: %p, " + "lb_call: %p)", + xdslb_policy(), lb_chand(), this, lb_call_); + } + // Create the ops. + grpc_call_error call_error; + grpc_op ops[3]; + memset(ops, 0, sizeof(ops)); + // Op: send initial metadata. + grpc_op* op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = 0; + op->reserved = nullptr; + op++; + // Op: send request message. + GPR_ASSERT(send_message_payload_ != nullptr); + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = send_message_payload_; + op->flags = 0; + op->reserved = nullptr; + op++; + Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release(); + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &on_initial_request_sent_); + GPR_ASSERT(GRPC_CALL_OK == call_error); + // Op: recv initial metadata. + op = ops; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata.recv_initial_metadata = + &initial_metadata_recv_; + op->flags = 0; + op->reserved = nullptr; + op++; + // Op: recv response. + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message.recv_message = &recv_message_payload_; + op->flags = 0; + op->reserved = nullptr; + op++; + Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release(); + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &on_response_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); + // Op: recv server status. + op = ops; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv_; + op->data.recv_status_on_client.status = &status_code_; + op->data.recv_status_on_client.status_details = &status_details_; + op->flags = 0; + op->reserved = nullptr; + op++; + // This callback signals the end of the LB call, so it relies on the initial + // ref instead of a new ref. When it's invoked, it's the initial ref that is + // unreffed. + call_error = grpc_call_start_batch_and_execute( + lb_call_, ops, (size_t)(op - ops), &on_status_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); +} + +XdsLb::LbChannelState::LrsCallState::~LrsCallState() { + grpc_metadata_array_destroy(&initial_metadata_recv_); + grpc_metadata_array_destroy(&trailing_metadata_recv_); + grpc_byte_buffer_destroy(send_message_payload_); + grpc_byte_buffer_destroy(recv_message_payload_); + grpc_slice_unref_internal(status_details_); + GPR_ASSERT(lb_call_ != nullptr); + grpc_call_unref(lb_call_); +} + +void XdsLb::LbChannelState::LrsCallState::Orphan() { + reporter_.reset(); + GPR_ASSERT(lb_call_ != nullptr); + // If we are here because xdslb_policy wants to cancel the call, + // on_status_received_ will complete the cancellation and clean up. Otherwise, + // we are here because xdslb_policy has to orphan a failed call, then the + // following cancellation will be a no-op. + grpc_call_cancel(lb_call_, nullptr); + // Note that the initial ref is hold by on_status_received_. So the + // corresponding unref happens in on_status_received_ instead of here. +} + +void XdsLb::LbChannelState::LrsCallState::MaybeStartReportingLocked() { + // Don't start if this is not the current call on the current channel. + if (!IsCurrentCallOnChannel() || !lb_chand()->IsCurrentChannel()) return; + // Don't start again if already started. + if (reporter_ != nullptr) return; + // Don't start if the previous send_message op (of the initial request or the + // last report of the previous reporter) hasn't completed. + if (send_message_payload_ != nullptr) return; + // Don't start if no LRS response has arrived. + if (!seen_response()) return; + // Don't start if the EDS call hasn't received any valid response. Note that + // this must be the first channel because it is the current channel but its + // EDS call hasn't seen any response. + EdsCallState* eds_calld = lb_chand()->eds_calld_->lb_calld(); + if (eds_calld == nullptr || !eds_calld->seen_response()) return; + // Start reporting. + lb_chand()->xdslb_policy_->client_stats_.MaybeInitLastReportTime(); + reporter_ = MakeOrphanable( + Ref(DEBUG_LOCATION, "LRS+load_report+start"), load_reporting_interval_); +} + +void XdsLb::LbChannelState::LrsCallState::OnInitialRequestSentLocked( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + // Clear the send_message_payload_. + grpc_byte_buffer_destroy(lrs_calld->send_message_payload_); + lrs_calld->send_message_payload_ = nullptr; + lrs_calld->MaybeStartReportingLocked(); + lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked"); +} + +void XdsLb::LbChannelState::LrsCallState::OnResponseReceivedLocked( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + XdsLb* xdslb_policy = lrs_calld->xdslb_policy(); + // Empty payload means the LB call was cancelled. + if (!lrs_calld->IsCurrentCallOnChannel() || + lrs_calld->recv_message_payload_ == nullptr) { + lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked"); + return; + } + // Read the response. + grpc_byte_buffer_reader bbr; + grpc_byte_buffer_reader_init(&bbr, lrs_calld->recv_message_payload_); + grpc_slice response_slice = grpc_byte_buffer_reader_readall(&bbr); + grpc_byte_buffer_reader_destroy(&bbr); + grpc_byte_buffer_destroy(lrs_calld->recv_message_payload_); + lrs_calld->recv_message_payload_ = nullptr; + // This anonymous lambda is a hack to avoid the usage of goto. + [&]() { + // Parse the response. + grpc_millis new_load_reporting_interval; + grpc_error* parse_error = XdsLrsResponseDecodeAndParse( + response_slice, &new_load_reporting_interval, + xdslb_policy->server_name_); + if (parse_error != GRPC_ERROR_NONE) { + gpr_log(GPR_ERROR, "[xdslb %p] LRS response parsing failed. error=%s", + xdslb_policy, grpc_error_string(parse_error)); + GRPC_ERROR_UNREF(parse_error); + return; + } + lrs_calld->seen_response_ = true; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log(GPR_INFO, + "[xdslb %p] LRS response received, load_report_interval=%" PRId64 + "ms", + xdslb_policy, new_load_reporting_interval); + } + if (new_load_reporting_interval < + GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) { + new_load_reporting_interval = + GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS; + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log( + GPR_INFO, + "[xdslb %p] Increased load_report_interval to minimum value %dms", + xdslb_policy, GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS); + } + } + // Ignore identical update. + if (lrs_calld->load_reporting_interval_ == new_load_reporting_interval) { + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + gpr_log(GPR_INFO, + "[xdslb %p] Incoming LRS response identical to current, " + "ignoring.", + xdslb_policy); + } + return; + } + // Stop current load reporting (if any) to adopt the new reporting interval. + lrs_calld->reporter_.reset(); + // Record the new config. + lrs_calld->load_reporting_interval_ = new_load_reporting_interval; + // Try starting sending load report. + lrs_calld->MaybeStartReportingLocked(); + }(); + grpc_slice_unref_internal(response_slice); + if (xdslb_policy->shutting_down_) { + lrs_calld->Unref(DEBUG_LOCATION, + "LRS+OnResponseReceivedLocked+xds_shutdown"); + return; + } + // Keep listening for LRS config updates. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_RECV_MESSAGE; + op.data.recv_message.recv_message = &lrs_calld->recv_message_payload_; + op.flags = 0; + op.reserved = nullptr; + GPR_ASSERT(lrs_calld->lb_call_ != nullptr); + // Reuse the "OnResponseReceivedLocked" ref taken in ctor. + const grpc_call_error call_error = grpc_call_start_batch_and_execute( + lrs_calld->lb_call_, &op, 1, &lrs_calld->on_response_received_); + GPR_ASSERT(GRPC_CALL_OK == call_error); +} + +void XdsLb::LbChannelState::LrsCallState::OnStatusReceivedLocked( + void* arg, grpc_error* error) { + LrsCallState* lrs_calld = static_cast(arg); + XdsLb* xdslb_policy = lrs_calld->xdslb_policy(); + LbChannelState* lb_chand = lrs_calld->lb_chand(); + GPR_ASSERT(lrs_calld->lb_call_ != nullptr); + if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_trace)) { + char* status_details = grpc_slice_to_c_string(lrs_calld->status_details_); + gpr_log(GPR_INFO, + "[xdslb %p] LRS call status received. Status = %d, details " + "= '%s', (lb_chand: %p, lb_calld: %p, lb_call: %p), error '%s'", + xdslb_policy, lrs_calld->status_code_, status_details, lb_chand, + lrs_calld, lrs_calld->lb_call_, grpc_error_string(error)); + gpr_free(status_details); + } + // Ignore status from a stale call. + if (lrs_calld->IsCurrentCallOnChannel()) { + // Because this call is the current one on the channel, the channel can't + // have been swapped out; otherwise, the call should have been reset. + GPR_ASSERT(lb_chand->IsCurrentChannel() || lb_chand->IsPendingChannel()); + GPR_ASSERT(!xdslb_policy->shutting_down_); + if (lb_chand == xdslb_policy->LatestLbChannel()) { + // This channel is the most recently created one. Try to restart the call + // and reresolve. + lrs_calld->parent_->OnCallFinishedLocked(); + xdslb_policy->channel_control_helper()->RequestReresolution(); + } + } + lrs_calld->Unref(DEBUG_LOCATION, "LRS+OnStatusReceivedLocked"); +} + +bool XdsLb::LbChannelState::LrsCallState::IsCurrentCallOnChannel() const { + // If the retryable LRS call is null (which only happens when the LB channel + // is shutting down), all the LRS calls are stale. + if (lb_chand()->lrs_calld_ == nullptr) return false; + return this == lb_chand()->lrs_calld_->lb_calld(); } // @@ -1249,8 +1662,7 @@ grpc_channel_args* BuildBalancerChannelArgs(const grpc_channel_args* args) { // ctor and dtor // -XdsLb::XdsLb(Args args) - : LoadBalancingPolicy(std::move(args)), locality_map_(), locality_list_() { +XdsLb::XdsLb(Args args) : LoadBalancingPolicy(std::move(args)) { // Record server name. const grpc_arg* arg = grpc_channel_args_find(args.args, GRPC_ARG_SERVER_URI); const char* server_uri = grpc_channel_arg_get_string(arg); @@ -1351,11 +1763,10 @@ void XdsLb::ProcessAddressesAndChannelArgsLocked( strcmp(last_balancer_name.get(), balancer_name_.get()) != 0; } if (create_lb_channel) { - OrphanablePtr lb_chand = - MakeOrphanable( - balancer_name_.get(), *lb_channel_args, - Ref(DEBUG_LOCATION, "BalancerChannelState")); - if (lb_chand_ == nullptr || !lb_chand_->HasActiveCall()) { + OrphanablePtr lb_chand = MakeOrphanable( + Ref(DEBUG_LOCATION, "XdsLb+LbChannelState"), balancer_name_.get(), + *lb_channel_args); + if (lb_chand_ == nullptr || !lb_chand_->HasActiveEdsCall()) { GPR_ASSERT(pending_lb_chand_ == nullptr); // If we do not have a working LB channel yet, use the newly created one. lb_chand_ = std::move(lb_chand); @@ -1604,15 +2015,15 @@ void XdsLb::LocalityMap::UpdateLocked( const grpc_channel_args* args, XdsLb* parent) { if (parent->shutting_down_) return; for (size_t i = 0; i < locality_list.size(); i++) { - auto iter = map_.find(locality_list[i].locality_name); + auto& locality_name = locality_list[i].locality_name; + auto iter = map_.find(locality_name); // Add a new entry in the locality map if a new locality is received in the // locality list. if (iter == map_.end()) { OrphanablePtr new_entry = MakeOrphanable( - parent->Ref(DEBUG_LOCATION, "LocalityEntry"), - locality_list[i].locality_name, locality_list[i].lb_weight); - iter = map_.emplace(locality_list[i].locality_name, std::move(new_entry)) - .first; + parent->Ref(DEBUG_LOCATION, "LocalityEntry"), locality_name, + locality_list[i].lb_weight); + iter = map_.emplace(locality_name, std::move(new_entry)).first; } // Keep a copy of serverlist in locality_list_ so that we can compare it // with the future ones. @@ -1834,7 +2245,7 @@ void XdsLb::LocalityMap::LocalityEntry::ShutdownLocked() { } // Drop our ref to the child's picker, in case it's holding a ref to // the child. - picker_ref_.reset(); + picker_wrapper_.reset(); } void XdsLb::LocalityMap::LocalityEntry::ResetBackoffLocked() { @@ -1911,12 +2322,10 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::UpdateState( // If we are in fallback mode, ignore update request from the child policy. if (entry_->parent_->fallback_policy_ != nullptr) return; GPR_ASSERT(entry_->parent_->lb_chand_ != nullptr); - RefCountedPtr client_stats = - entry_->parent_->lb_chand_->lb_calld() == nullptr - ? nullptr - : entry_->parent_->lb_chand_->lb_calld()->client_stats(); - // Cache the picker and its state in the entry - entry_->picker_ref_ = MakeRefCounted(std::move(picker)); + // Cache the picker and its state in the entry. + entry_->picker_wrapper_ = MakeRefCounted( + std::move(picker), + entry_->parent_->client_stats_.FindLocalityStats(entry_->name_)); entry_->connectivity_state_ = state; // Construct a new xds picker which maintains a map of all locality pickers // that are ready. Each locality is represented by a portion of the range @@ -1926,15 +2335,30 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::UpdateState( size_t num_connecting = 0; size_t num_idle = 0; size_t num_transient_failures = 0; - auto& locality_map = this->entry_->parent_->locality_map_.map_; Picker::PickerList pickers; - for (auto& p : locality_map) { + for (auto& p : entry_->parent_->locality_map_.map_) { + // TODO(juanlishen): We should prune a locality (and kill its stats) after + // we know we won't pick from it. We need to improve our update logic to + // make that easier. Consider the following situation: the current map has + // two READY localities A and B, and the update only contains B with the + // same addresses as before. Without the following hack, we will generate + // the same picker containing A and B because we haven't pruned A when the + // update happens. Remove the for loop below once we implement the locality + // map update. + bool in_locality_list = false; + for (size_t i = 0; i < entry_->parent_->locality_list_.size(); ++i) { + if (*entry_->parent_->locality_list_[i].locality_name == *p.first) { + in_locality_list = true; + break; + } + } + if (!in_locality_list) continue; const LocalityEntry* entry = p.second.get(); grpc_connectivity_state connectivity_state = entry->connectivity_state_; switch (connectivity_state) { case GRPC_CHANNEL_READY: { end += entry->locality_weight_; - pickers.push_back(MakePair(end, entry->picker_ref_)); + pickers.push_back(MakePair(end, entry->picker_wrapper_)); break; } case GRPC_CHANNEL_CONNECTING: { @@ -1959,29 +2383,30 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::UpdateState( // otherwise pass a QueuePicker if any of the locality pickers are in a // connecting or idle state, finally return a transient failure picker if all // locality pickers are in transient failure - if (pickers.size() > 0) { + if (!pickers.empty()) { entry_->parent_->channel_control_helper()->UpdateState( - GRPC_CHANNEL_READY, - UniquePtr( - New(std::move(client_stats), std::move(pickers)))); + GRPC_CHANNEL_READY, UniquePtr( + New(std::move(pickers)))); } else if (num_connecting > 0) { entry_->parent_->channel_control_helper()->UpdateState( GRPC_CHANNEL_CONNECTING, UniquePtr(New( - this->entry_->parent_->Ref(DEBUG_LOCATION, "QueuePicker")))); + entry_->parent_->Ref(DEBUG_LOCATION, "QueuePicker")))); } else if (num_idle > 0) { entry_->parent_->channel_control_helper()->UpdateState( GRPC_CHANNEL_IDLE, UniquePtr(New( - this->entry_->parent_->Ref(DEBUG_LOCATION, "QueuePicker")))); + entry_->parent_->Ref(DEBUG_LOCATION, "QueuePicker")))); } else { - GPR_ASSERT(num_transient_failures == locality_map.size()); + GPR_ASSERT(num_transient_failures == + entry_->parent_->locality_map_.map_.size()); grpc_error* error = grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING( "connections to all localities failing"), GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE); entry_->parent_->channel_control_helper()->UpdateState( - state, UniquePtr(New(error))); + GRPC_CHANNEL_TRANSIENT_FAILURE, + UniquePtr(New(error))); } } @@ -2003,8 +2428,8 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::RequestReresolution() { // from the balancer, so we can ignore the re-resolution request from // the child policy. Otherwise, pass the re-resolution request up to the // channel. - if (entry_->parent_->lb_chand_->lb_calld() == nullptr || - !entry_->parent_->lb_chand_->lb_calld()->seen_response()) { + if (entry_->parent_->lb_chand_->eds_calld() == nullptr || + !entry_->parent_->lb_chand_->eds_calld()->seen_response()) { entry_->parent_->channel_control_helper()->RequestReresolution(); } } diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc index cdf5408be36..5057e614c68 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.cc @@ -26,60 +26,163 @@ namespace grpc_core { -void XdsLbClientStats::AddCallStarted() { - gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1); +namespace { + +template +T GetAndResetCounter(Atomic* from) { + return from->Exchange(0, MemoryOrder::RELAXED); +} + +} // namespace + +// +// XdsClientStats::LocalityStats::LoadMetric::Snapshot +// + +bool XdsClientStats::LocalityStats::LoadMetric::Snapshot::IsAllZero() const { + return total_metric_value == 0 && num_requests_finished_with_metric == 0; +} + +// +// XdsClientStats::LocalityStats::LoadMetric +// + +XdsClientStats::LocalityStats::LoadMetric::Snapshot +XdsClientStats::LocalityStats::LoadMetric::GetSnapshotAndReset() { + Snapshot metric = {num_requests_finished_with_metric_, total_metric_value_}; + num_requests_finished_with_metric_ = 0; + total_metric_value_ = 0; + return metric; } -void XdsLbClientStats::AddCallFinished(bool finished_with_client_failed_to_send, - bool finished_known_received) { - gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1); - if (finished_with_client_failed_to_send) { - gpr_atm_full_fetch_add(&num_calls_finished_with_client_failed_to_send_, - (gpr_atm)1); +// +// XdsClientStats::LocalityStats::Snapshot +// + +bool XdsClientStats::LocalityStats::Snapshot::IsAllZero() { + if (total_successful_requests != 0 || total_requests_in_progress != 0 || + total_error_requests != 0 || total_issued_requests != 0) { + return false; } - if (finished_known_received) { - gpr_atm_full_fetch_add(&num_calls_finished_known_received_, (gpr_atm)1); + for (auto& p : load_metric_stats) { + const LoadMetric::Snapshot& metric_value = p.second; + if (!metric_value.IsAllZero()) return false; } + return true; } -void XdsLbClientStats::AddCallDroppedLocked(char* token) { - // Increment num_calls_started and num_calls_finished. - gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1); - gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1); - // Record the drop. - if (drop_token_counts_ == nullptr) { - drop_token_counts_.reset(New()); - } - for (size_t i = 0; i < drop_token_counts_->size(); ++i) { - if (strcmp((*drop_token_counts_)[i].token.get(), token) == 0) { - ++(*drop_token_counts_)[i].count; - return; +// +// XdsClientStats::LocalityStats +// + +XdsClientStats::LocalityStats::Snapshot +XdsClientStats::LocalityStats::GetSnapshotAndReset() { + Snapshot snapshot = { + GetAndResetCounter(&total_successful_requests_), + // Don't reset total_requests_in_progress because it's not + // related to a single reporting interval. + total_requests_in_progress_.Load(MemoryOrder::RELAXED), + GetAndResetCounter(&total_error_requests_), + GetAndResetCounter(&total_issued_requests_)}; + { + MutexLock lock(&load_metric_stats_mu_); + for (auto& p : load_metric_stats_) { + const char* metric_name = p.first.get(); + LoadMetric& metric_value = p.second; + snapshot.load_metric_stats.emplace( + UniquePtr(gpr_strdup(metric_name)), + metric_value.GetSnapshotAndReset()); } } - // Not found, so add a new entry. - drop_token_counts_->emplace_back(UniquePtr(gpr_strdup(token)), 1); + return snapshot; } -namespace { +void XdsClientStats::LocalityStats::AddCallStarted() { + total_issued_requests_.FetchAdd(1, MemoryOrder::RELAXED); + total_requests_in_progress_.FetchAdd(1, MemoryOrder::RELAXED); +} -void AtomicGetAndResetCounter(int64_t* value, gpr_atm* counter) { - *value = static_cast(gpr_atm_full_xchg(counter, (gpr_atm)0)); +void XdsClientStats::LocalityStats::AddCallFinished(bool fail) { + Atomic& to_increment = + fail ? total_error_requests_ : total_successful_requests_; + to_increment.FetchAdd(1, MemoryOrder::RELAXED); + total_requests_in_progress_.FetchAdd(-1, MemoryOrder::ACQ_REL); } -} // namespace +// +// XdsClientStats::Snapshot +// -void XdsLbClientStats::GetLocked( - int64_t* num_calls_started, int64_t* num_calls_finished, - int64_t* num_calls_finished_with_client_failed_to_send, - int64_t* num_calls_finished_known_received, - UniquePtr* drop_token_counts) { - AtomicGetAndResetCounter(num_calls_started, &num_calls_started_); - AtomicGetAndResetCounter(num_calls_finished, &num_calls_finished_); - AtomicGetAndResetCounter(num_calls_finished_with_client_failed_to_send, - &num_calls_finished_with_client_failed_to_send_); - AtomicGetAndResetCounter(num_calls_finished_known_received, - &num_calls_finished_known_received_); - *drop_token_counts = std::move(drop_token_counts_); +bool XdsClientStats::Snapshot::IsAllZero() { + for (auto& p : upstream_locality_stats) { + if (!p.second.IsAllZero()) return false; + } + for (auto& p : dropped_requests) { + if (p.second != 0) return false; + } + return total_dropped_requests == 0; +} + +// +// XdsClientStats +// + +XdsClientStats::Snapshot XdsClientStats::GetSnapshotAndReset() { + grpc_millis now = ExecCtx::Get()->Now(); + // Record total_dropped_requests and reporting interval in the snapshot. + Snapshot snapshot; + snapshot.total_dropped_requests = + GetAndResetCounter(&total_dropped_requests_); + snapshot.load_report_interval = now - last_report_time_; + // Update last report time. + last_report_time_ = now; + // Snapshot all the other stats. + for (auto& p : upstream_locality_stats_) { + snapshot.upstream_locality_stats.emplace(p.first, + p.second->GetSnapshotAndReset()); + } + { + MutexLock lock(&dropped_requests_mu_); + snapshot.dropped_requests = std::move(dropped_requests_); + } + return snapshot; +} + +void XdsClientStats::MaybeInitLastReportTime() { + if (last_report_time_ == -1) last_report_time_ = ExecCtx::Get()->Now(); +} + +RefCountedPtr XdsClientStats::FindLocalityStats( + const RefCountedPtr& locality_name) { + auto iter = upstream_locality_stats_.find(locality_name); + if (iter == upstream_locality_stats_.end()) { + iter = upstream_locality_stats_ + .emplace(locality_name, MakeRefCounted()) + .first; + } + return iter->second; +} + +void XdsClientStats::PruneLocalityStats() { + auto iter = upstream_locality_stats_.begin(); + while (iter != upstream_locality_stats_.end()) { + if (iter->second->IsSafeToDelete()) { + iter = upstream_locality_stats_.erase(iter); + } else { + ++iter; + } + } +} + +void XdsClientStats::AddCallDropped(UniquePtr category) { + total_dropped_requests_.FetchAdd(1, MemoryOrder::RELAXED); + MutexLock lock(&dropped_requests_mu_); + auto iter = dropped_requests_.find(category); + if (iter == dropped_requests_.end()) { + dropped_requests_.emplace(UniquePtr(gpr_strdup(category.get())), 1); + } else { + ++iter->second; + } } } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h index fa0b9f4b635..445675a4dcf 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h @@ -21,49 +21,207 @@ #include -#include +#include +#include "src/core/lib/gprpp/atomic.h" #include "src/core/lib/gprpp/inlined_vector.h" +#include "src/core/lib/gprpp/map.h" #include "src/core/lib/gprpp/memory.h" #include "src/core/lib/gprpp/ref_counted.h" +#include "src/core/lib/gprpp/sync.h" +#include "src/core/lib/iomgr/exec_ctx.h" namespace grpc_core { -class XdsLbClientStats : public RefCounted { +class XdsLocalityName : public RefCounted { public: - struct DropTokenCount { - UniquePtr token; - int64_t count; + struct Less { + bool operator()(const RefCountedPtr& lhs, + const RefCountedPtr& rhs) { + int cmp_result = strcmp(lhs->region_.get(), rhs->region_.get()); + if (cmp_result != 0) return cmp_result < 0; + cmp_result = strcmp(lhs->zone_.get(), rhs->zone_.get()); + if (cmp_result != 0) return cmp_result < 0; + return strcmp(lhs->sub_zone_.get(), rhs->sub_zone_.get()) < 0; + } + }; + + XdsLocalityName(UniquePtr region, UniquePtr zone, + UniquePtr subzone) + : region_(std::move(region)), + zone_(std::move(zone)), + sub_zone_(std::move(subzone)) {} + + bool operator==(const XdsLocalityName& other) const { + return strcmp(region_.get(), other.region_.get()) == 0 && + strcmp(zone_.get(), other.zone_.get()) == 0 && + strcmp(sub_zone_.get(), other.sub_zone_.get()) == 0; + } + + const char* region() const { return region_.get(); } + const char* zone() const { return zone_.get(); } + const char* sub_zone() const { return sub_zone_.get(); } + + const char* AsHumanReadableString() { + if (human_readable_string_ == nullptr) { + char* tmp; + gpr_asprintf(&tmp, "{region=\"%s\", zone=\"%s\", sub_zone=\"%s\"}", + region_.get(), zone_.get(), sub_zone_.get()); + human_readable_string_.reset(tmp); + } + return human_readable_string_.get(); + } + + private: + UniquePtr region_; + UniquePtr zone_; + UniquePtr sub_zone_; + UniquePtr human_readable_string_; +}; + +// The stats classes (i.e., XdsClientStats, LocalityStats, and LoadMetric) can +// be taken a snapshot (and reset) to populate the load report. The snapshots +// are contained in the respective Snapshot structs. The Snapshot structs have +// no synchronization. The stats classes use several different synchronization +// methods. 1. Most of the counters are Atomic<>s for performance. 2. Some of +// the Map<>s are protected by Mutex if we are not guaranteed that the accesses +// to them are synchronized by the callers. 3. The Map<>s to which the accesses +// are already synchronized by the callers do not have additional +// synchronization here. Note that the Map<>s we mentioned in 2 and 3 refer to +// the map's tree structure rather than the content in each tree node. +class XdsClientStats { + public: + class LocalityStats : public RefCounted { + public: + class LoadMetric { + public: + struct Snapshot { + bool IsAllZero() const; - DropTokenCount(UniquePtr token, int64_t count) - : token(std::move(token)), count(count) {} + uint64_t num_requests_finished_with_metric; + double total_metric_value; + }; + + // Returns a snapshot of this instance and reset all the accumulative + // counters. + Snapshot GetSnapshotAndReset(); + + private: + uint64_t num_requests_finished_with_metric_{0}; + double total_metric_value_{0}; + }; + + using LoadMetricMap = Map, LoadMetric, StringLess>; + using LoadMetricSnapshotMap = + Map, LoadMetric::Snapshot, StringLess>; + + struct Snapshot { + // TODO(juanlishen): Change this to const method when const_iterator is + // added to Map<>. + bool IsAllZero(); + + uint64_t total_successful_requests; + uint64_t total_requests_in_progress; + uint64_t total_error_requests; + uint64_t total_issued_requests; + LoadMetricSnapshotMap load_metric_stats; + }; + + // Returns a snapshot of this instance and reset all the accumulative + // counters. + Snapshot GetSnapshotAndReset(); + + // Each XdsLb::PickerWrapper holds a ref to the perspective LocalityStats. + // If the refcount is 0, there won't be new calls recorded to the + // LocalityStats, so the LocalityStats can be safely deleted when all the + // in-progress calls have finished. + // Only be called from the control plane combiner. + void RefByPicker() { picker_refcount_.FetchAdd(1, MemoryOrder::ACQ_REL); } + // Might be called from the control plane combiner or the data plane + // combiner. + // TODO(juanlishen): Once https://github.com/grpc/grpc/pull/19390 is merged, + // this method will also only be invoked in the control plane combiner. + // We may then be able to simplify the LocalityStats' lifetime by making it + // RefCounted<> and populating the protobuf in its dtor. + void UnrefByPicker() { picker_refcount_.FetchSub(1, MemoryOrder::ACQ_REL); } + // Only be called from the control plane combiner. + // The only place where the picker_refcount_ can be increased is + // RefByPicker(), which also can only be called from the control plane + // combiner. Also, if the picker_refcount_ is 0, total_requests_in_progress_ + // can't be increased from 0. So it's safe to delete the LocalityStats right + // after this method returns true. + bool IsSafeToDelete() { + return picker_refcount_.FetchAdd(0, MemoryOrder::ACQ_REL) == 0 && + total_requests_in_progress_.FetchAdd(0, MemoryOrder::ACQ_REL) == 0; + } + + void AddCallStarted(); + void AddCallFinished(bool fail = false); + + private: + Atomic total_successful_requests_{0}; + Atomic total_requests_in_progress_{0}; + // Requests that were issued (not dropped) but failed. + Atomic total_error_requests_{0}; + Atomic total_issued_requests_{0}; + // Protects load_metric_stats_. A mutex is necessary because the length of + // load_metric_stats_ can be accessed by both the callback intercepting the + // call's recv_trailing_metadata (not from any combiner) and the load + // reporting thread (from the control plane combiner). + Mutex load_metric_stats_mu_; + LoadMetricMap load_metric_stats_; + // Can be accessed from either the control plane combiner or the data plane + // combiner. + Atomic picker_refcount_{0}; }; - typedef InlinedVector DroppedCallCounts; + // TODO(juanlishen): The value type of Map<> must be movable in current + // implementation. To avoid making LocalityStats movable, we wrap it by + // UniquePtr<>. We should remove this wrapper if the value type of Map<> + // doesn't have to be movable. + using LocalityStatsMap = + Map, RefCountedPtr, + XdsLocalityName::Less>; + using LocalityStatsSnapshotMap = + Map, LocalityStats::Snapshot, + XdsLocalityName::Less>; + using DroppedRequestsMap = Map, uint64_t, StringLess>; + using DroppedRequestsSnapshotMap = DroppedRequestsMap; - XdsLbClientStats() {} + struct Snapshot { + // TODO(juanlishen): Change this to const method when const_iterator is + // added to Map<>. + bool IsAllZero(); - void AddCallStarted(); - void AddCallFinished(bool finished_with_client_failed_to_send, - bool finished_known_received); + LocalityStatsSnapshotMap upstream_locality_stats; + uint64_t total_dropped_requests; + DroppedRequestsSnapshotMap dropped_requests; + // The actual load report interval. + grpc_millis load_report_interval; + }; - // This method is not thread-safe; caller must synchronize. - void AddCallDroppedLocked(char* token); + // Returns a snapshot of this instance and reset all the accumulative + // counters. + Snapshot GetSnapshotAndReset(); - // This method is not thread-safe; caller must synchronize. - void GetLocked(int64_t* num_calls_started, int64_t* num_calls_finished, - int64_t* num_calls_finished_with_client_failed_to_send, - int64_t* num_calls_finished_known_received, - UniquePtr* drop_token_counts); + void MaybeInitLastReportTime(); + RefCountedPtr FindLocalityStats( + const RefCountedPtr& locality_name); + void PruneLocalityStats(); + void AddCallDropped(UniquePtr category); private: - // This field must only be accessed via *_locked() methods. - UniquePtr drop_token_counts_; - // These fields may be accessed from multiple threads at a time. - gpr_atm num_calls_started_ = 0; - gpr_atm num_calls_finished_ = 0; - gpr_atm num_calls_finished_with_client_failed_to_send_ = 0; - gpr_atm num_calls_finished_known_received_ = 0; + // The stats for each locality. + LocalityStatsMap upstream_locality_stats_; + Atomic total_dropped_requests_{0}; + // Protects dropped_requests_. A mutex is necessary because the length of + // dropped_requests_ can be accessed by both the picker (from data plane + // combiner) and the load reporting thread (from the control plane combiner). + Mutex dropped_requests_mu_; + DroppedRequestsMap dropped_requests_; + // The timestamp of last reporting. For the LB-policy-wide first report, the + // last_report_time is the time we scheduled the first reporting timer. + grpc_millis last_report_time_ = -1; }; } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc index 8ae6a5e7590..b2615f319b9 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.cc @@ -33,7 +33,10 @@ #include "envoy/api/v2/discovery.upb.h" #include "envoy/api/v2/eds.upb.h" #include "envoy/api/v2/endpoint/endpoint.upb.h" +#include "envoy/api/v2/endpoint/load_report.upb.h" +#include "envoy/service/load_stats/v2/lrs.upb.h" #include "google/protobuf/any.upb.h" +#include "google/protobuf/duration.upb.h" #include "google/protobuf/struct.upb.h" #include "google/protobuf/timestamp.upb.h" #include "google/protobuf/wrappers.upb.h" @@ -209,54 +212,163 @@ grpc_error* XdsEdsResponseDecodeAndParse(const grpc_slice& encoded_response, namespace { -void google_protobuf_Timestamp_assign(google_protobuf_Timestamp* timestamp, - const gpr_timespec& value) { - google_protobuf_Timestamp_set_seconds(timestamp, value.tv_sec); - google_protobuf_Timestamp_set_nanos(timestamp, value.tv_nsec); +grpc_slice LrsRequestEncode( + const envoy_service_load_stats_v2_LoadStatsRequest* request, + upb_arena* arena) { + size_t output_length; + char* output = envoy_service_load_stats_v2_LoadStatsRequest_serialize( + request, arena, &output_length); + return grpc_slice_from_copied_buffer(output, output_length); } } // namespace -xds_grpclb_request* xds_grpclb_load_report_request_create_locked( - grpc_core::XdsLbClientStats* client_stats, upb_arena* arena) { - xds_grpclb_request* req = grpc_lb_v1_LoadBalanceRequest_new(arena); - grpc_lb_v1_ClientStats* req_stats = - grpc_lb_v1_LoadBalanceRequest_mutable_client_stats(req, arena); - google_protobuf_Timestamp_assign( - grpc_lb_v1_ClientStats_mutable_timestamp(req_stats, arena), - gpr_now(GPR_CLOCK_REALTIME)); +grpc_slice XdsLrsRequestCreateAndEncode(const char* server_name) { + upb::Arena arena; + // Create a request. + envoy_service_load_stats_v2_LoadStatsRequest* request = + envoy_service_load_stats_v2_LoadStatsRequest_new(arena.ptr()); + // Add cluster stats. There is only one because we only use one server name in + // one channel. + envoy_api_v2_endpoint_ClusterStats* cluster_stats = + envoy_service_load_stats_v2_LoadStatsRequest_add_cluster_stats( + request, arena.ptr()); + // Set the cluster name. + envoy_api_v2_endpoint_ClusterStats_set_cluster_name( + cluster_stats, upb_strview_makez(server_name)); + return LrsRequestEncode(request, arena.ptr()); +} + +namespace { + +void LocalityStatsPopulate(envoy_api_v2_endpoint_UpstreamLocalityStats* output, + Pair, + XdsClientStats::LocalityStats::Snapshot>& input, + upb_arena* arena) { + // Set sub_zone. + envoy_api_v2_core_Locality* locality = + envoy_api_v2_endpoint_UpstreamLocalityStats_mutable_locality(output, + arena); + envoy_api_v2_core_Locality_set_sub_zone( + locality, upb_strview_makez(input.first->sub_zone())); + // Set total counts. + XdsClientStats::LocalityStats::Snapshot& snapshot = input.second; + envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_successful_requests( + output, snapshot.total_successful_requests); + envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_requests_in_progress( + output, snapshot.total_requests_in_progress); + envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_error_requests( + output, snapshot.total_error_requests); + envoy_api_v2_endpoint_UpstreamLocalityStats_set_total_issued_requests( + output, snapshot.total_issued_requests); + // Add load metric stats. + for (auto& p : snapshot.load_metric_stats) { + const char* metric_name = p.first.get(); + const XdsClientStats::LocalityStats::LoadMetric::Snapshot& metric_value = + p.second; + envoy_api_v2_endpoint_EndpointLoadMetricStats* load_metric = + envoy_api_v2_endpoint_UpstreamLocalityStats_add_load_metric_stats( + output, arena); + envoy_api_v2_endpoint_EndpointLoadMetricStats_set_metric_name( + load_metric, upb_strview_makez(metric_name)); + envoy_api_v2_endpoint_EndpointLoadMetricStats_set_num_requests_finished_with_metric( + load_metric, metric_value.num_requests_finished_with_metric); + envoy_api_v2_endpoint_EndpointLoadMetricStats_set_total_metric_value( + load_metric, metric_value.total_metric_value); + } +} - int64_t num_calls_started; - int64_t num_calls_finished; - int64_t num_calls_finished_with_client_failed_to_send; - int64_t num_calls_finished_known_received; - UniquePtr drop_token_counts; - client_stats->GetLocked(&num_calls_started, &num_calls_finished, - &num_calls_finished_with_client_failed_to_send, - &num_calls_finished_known_received, - &drop_token_counts); - grpc_lb_v1_ClientStats_set_num_calls_started(req_stats, num_calls_started); - grpc_lb_v1_ClientStats_set_num_calls_finished(req_stats, num_calls_finished); - grpc_lb_v1_ClientStats_set_num_calls_finished_with_client_failed_to_send( - req_stats, num_calls_finished_with_client_failed_to_send); - grpc_lb_v1_ClientStats_set_num_calls_finished_known_received( - req_stats, num_calls_finished_known_received); - if (drop_token_counts != nullptr) { - for (size_t i = 0; i < drop_token_counts->size(); ++i) { - XdsLbClientStats::DropTokenCount& cur = (*drop_token_counts)[i]; - grpc_lb_v1_ClientStatsPerToken* cur_msg = - grpc_lb_v1_ClientStats_add_calls_finished_with_drop(req_stats, arena); +} // namespace - const size_t token_len = strlen(cur.token.get()); - char* token = reinterpret_cast(upb_arena_malloc(arena, token_len)); - memcpy(token, cur.token.get(), token_len); +grpc_slice XdsLrsRequestCreateAndEncode(const char* server_name, + XdsClientStats* client_stats) { + upb::Arena arena; + XdsClientStats::Snapshot snapshot = client_stats->GetSnapshotAndReset(); + // Prune unused locality stats. + client_stats->PruneLocalityStats(); + // When all the counts are zero, return empty slice. + if (snapshot.IsAllZero()) return grpc_empty_slice(); + // Create a request. + envoy_service_load_stats_v2_LoadStatsRequest* request = + envoy_service_load_stats_v2_LoadStatsRequest_new(arena.ptr()); + // Add cluster stats. There is only one because we only use one server name in + // one channel. + envoy_api_v2_endpoint_ClusterStats* cluster_stats = + envoy_service_load_stats_v2_LoadStatsRequest_add_cluster_stats( + request, arena.ptr()); + // Set the cluster name. + envoy_api_v2_endpoint_ClusterStats_set_cluster_name( + cluster_stats, upb_strview_makez(server_name)); + // Add locality stats. + for (auto& p : snapshot.upstream_locality_stats) { + envoy_api_v2_endpoint_UpstreamLocalityStats* locality_stats = + envoy_api_v2_endpoint_ClusterStats_add_upstream_locality_stats( + cluster_stats, arena.ptr()); + LocalityStatsPopulate(locality_stats, p, arena.ptr()); + } + // Add dropped requests. + for (auto& p : snapshot.dropped_requests) { + const char* category = p.first.get(); + const uint64_t count = p.second; + envoy_api_v2_endpoint_ClusterStats_DroppedRequests* dropped_requests = + envoy_api_v2_endpoint_ClusterStats_add_dropped_requests(cluster_stats, + arena.ptr()); + envoy_api_v2_endpoint_ClusterStats_DroppedRequests_set_category( + dropped_requests, upb_strview_makez(category)); + envoy_api_v2_endpoint_ClusterStats_DroppedRequests_set_dropped_count( + dropped_requests, count); + } + // Set total dropped requests. + envoy_api_v2_endpoint_ClusterStats_set_total_dropped_requests( + cluster_stats, snapshot.total_dropped_requests); + // Set real load report interval. + gpr_timespec timespec = + grpc_millis_to_timespec(snapshot.load_report_interval, GPR_TIMESPAN); + google_protobuf_Duration* load_report_interval = + envoy_api_v2_endpoint_ClusterStats_mutable_load_report_interval( + cluster_stats, arena.ptr()); + google_protobuf_Duration_set_seconds(load_report_interval, timespec.tv_sec); + google_protobuf_Duration_set_nanos(load_report_interval, timespec.tv_nsec); + return LrsRequestEncode(request, arena.ptr()); +} - grpc_lb_v1_ClientStatsPerToken_set_load_balance_token( - cur_msg, upb_strview_make(token, token_len)); - grpc_lb_v1_ClientStatsPerToken_set_num_calls(cur_msg, cur.count); - } +grpc_error* XdsLrsResponseDecodeAndParse(const grpc_slice& encoded_response, + grpc_millis* load_reporting_interval, + const char* expected_server_name) { + upb::Arena arena; + // Decode the response. + const envoy_service_load_stats_v2_LoadStatsResponse* decoded_response = + envoy_service_load_stats_v2_LoadStatsResponse_parse( + reinterpret_cast(GRPC_SLICE_START_PTR(encoded_response)), + GRPC_SLICE_LENGTH(encoded_response), arena.ptr()); + // Parse the response. + if (decoded_response == nullptr) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No response found."); + } + // Check the cluster size in the response. + size_t size; + const upb_strview* clusters = + envoy_service_load_stats_v2_LoadStatsResponse_clusters(decoded_response, + &size); + if (size != 1) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "The number of clusters (server names) is not 1."); } - return req; + // Check the cluster name in the response + if (strncmp(expected_server_name, clusters[0].data, clusters[0].size) != 0) { + return GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "Unexpected cluster (server name)."); + } + // Get the load report interval. + const google_protobuf_Duration* load_reporting_interval_duration = + envoy_service_load_stats_v2_LoadStatsResponse_load_reporting_interval( + decoded_response); + gpr_timespec timespec{ + google_protobuf_Duration_seconds(load_reporting_interval_duration), + google_protobuf_Duration_nanos(load_reporting_interval_duration), + GPR_TIMESPAN}; + *load_reporting_interval = gpr_time_to_millis(timespec); + return GRPC_ERROR_NONE; } } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h index 8e31584e257..ed3b49ff836 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h @@ -25,58 +25,9 @@ #include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h" #include "src/core/ext/filters/client_channel/server_address.h" -#include "src/proto/grpc/lb/v1/load_balancer.upb.h" namespace grpc_core { -typedef grpc_lb_v1_LoadBalanceRequest xds_grpclb_request; - -class XdsLocalityName : public RefCounted { - public: - struct Less { - bool operator()(const RefCountedPtr& lhs, - const RefCountedPtr& rhs) { - int cmp_result = strcmp(lhs->region_.get(), rhs->region_.get()); - if (cmp_result != 0) return cmp_result < 0; - cmp_result = strcmp(lhs->zone_.get(), rhs->zone_.get()); - if (cmp_result != 0) return cmp_result < 0; - return strcmp(lhs->sub_zone_.get(), rhs->sub_zone_.get()) < 0; - } - }; - - XdsLocalityName(UniquePtr region, UniquePtr zone, - UniquePtr sub_zone) - : region_(std::move(region)), - zone_(std::move(zone)), - sub_zone_(std::move(sub_zone)) {} - - bool operator==(const XdsLocalityName& other) const { - return strcmp(region_.get(), other.region_.get()) == 0 && - strcmp(zone_.get(), other.zone_.get()) == 0 && - strcmp(sub_zone_.get(), other.sub_zone_.get()) == 0; - } - - const char* region() const { return region_.get(); } - const char* zone() const { return zone_.get(); } - const char* sub_zone() const { return sub_zone_.get(); } - - const char* AsHumanReadableString() { - if (human_readable_string_ == nullptr) { - char* tmp; - gpr_asprintf(&tmp, "{region=\"%s\", zone=\"%s\", sub_zone=\"%s\"}", - region_.get(), zone_.get(), sub_zone_.get()); - human_readable_string_.reset(tmp); - } - return human_readable_string_.get(); - } - - private: - UniquePtr region_; - UniquePtr zone_; - UniquePtr sub_zone_; - UniquePtr human_readable_string_; -}; - struct XdsLocalityInfo { bool operator==(const XdsLocalityInfo& other) const { return *locality_name == *other.locality_name && @@ -112,9 +63,20 @@ grpc_slice XdsEdsRequestCreateAndEncode(const char* service_name); grpc_error* XdsEdsResponseDecodeAndParse(const grpc_slice& encoded_response, XdsUpdate* update); -// TODO(juanlishen): Delete these when LRS is added. -xds_grpclb_request* xds_grpclb_load_report_request_create_locked( - grpc_core::XdsLbClientStats* client_stats, upb_arena* arena); +// Creates an LRS request querying \a server_name. +grpc_slice XdsLrsRequestCreateAndEncode(const char* server_name); + +// Creates an LRS request sending client-side load reports. If all the counters +// in \a client_stats are zero, returns empty slice. +grpc_slice XdsLrsRequestCreateAndEncode(const char* server_name, + XdsClientStats* client_stats); + +// Parses the LRS response and returns the client-side load reporting interval. +// If there is any error (e.g., the found server name doesn't match \a +// expected_server_name), the output config is invalid. +grpc_error* XdsLrsResponseDecodeAndParse(const grpc_slice& encoded_response, + grpc_millis* load_reporting_interval, + const char* expected_server_name); } // namespace grpc_core diff --git a/src/core/lib/gprpp/atomic.h b/src/core/lib/gprpp/atomic.h index 80412ef9583..095ebf12565 100644 --- a/src/core/lib/gprpp/atomic.h +++ b/src/core/lib/gprpp/atomic.h @@ -49,6 +49,10 @@ class Atomic { storage_.store(val, static_cast(order)); } + T Exchange(T desired, MemoryOrder order) { + return storage_.exchange(desired, static_cast(order)); + } + bool CompareExchangeWeak(T* expected, T desired, MemoryOrder success, MemoryOrder failure) { return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_weak( diff --git a/src/core/lib/gprpp/map.h b/src/core/lib/gprpp/map.h index 5238f43469f..d133a89fb43 100644 --- a/src/core/lib/gprpp/map.h +++ b/src/core/lib/gprpp/map.h @@ -89,7 +89,7 @@ class Map { // Removes the current entry and points to the next one iterator erase(iterator iter); - size_t size() { return size_; } + size_t size() const { return size_; } template Pair emplace(Args&&... args); diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc index d72ea3f63ae..e2e618726f1 100644 --- a/src/core/lib/transport/static_metadata.cc +++ b/src/core/lib/transport/static_metadata.cc @@ -63,61 +63,66 @@ static uint8_t g_bytes[] = { 116, 101, 115, 47, 103, 114, 112, 99, 46, 108, 98, 46, 118, 49, 46, 76, 111, 97, 100, 66, 97, 108, 97, 110, 99, 101, 114, 47, 66, 97, 108, 97, 110, 99, 101, 76, 111, 97, 100, 47, 101, 110, 118, 111, 121, - 46, 97, 112, 105, 46, 118, 50, 46, 69, 110, 100, 112, 111, 105, 110, - 116, 68, 105, 115, 99, 111, 118, 101, 114, 121, 83, 101, 114, 118, 105, - 99, 101, 47, 83, 116, 114, 101, 97, 109, 69, 110, 100, 112, 111, 105, - 110, 116, 115, 47, 103, 114, 112, 99, 46, 104, 101, 97, 108, 116, 104, - 46, 118, 49, 46, 72, 101, 97, 108, 116, 104, 47, 87, 97, 116, 99, - 104, 47, 101, 110, 118, 111, 121, 46, 115, 101, 114, 118, 105, 99, 101, - 46, 100, 105, 115, 99, 111, 118, 101, 114, 121, 46, 118, 50, 46, 65, - 103, 103, 114, 101, 103, 97, 116, 101, 100, 68, 105, 115, 99, 111, 118, - 101, 114, 121, 83, 101, 114, 118, 105, 99, 101, 47, 83, 116, 114, 101, - 97, 109, 65, 103, 103, 114, 101, 103, 97, 116, 101, 100, 82, 101, 115, - 111, 117, 114, 99, 101, 115, 100, 101, 102, 108, 97, 116, 101, 103, 122, - 105, 112, 115, 116, 114, 101, 97, 109, 47, 103, 122, 105, 112, 71, 69, - 84, 80, 79, 83, 84, 47, 47, 105, 110, 100, 101, 120, 46, 104, 116, - 109, 108, 104, 116, 116, 112, 104, 116, 116, 112, 115, 50, 48, 48, 50, - 48, 52, 50, 48, 54, 51, 48, 52, 52, 48, 48, 52, 48, 52, 53, - 48, 48, 97, 99, 99, 101, 112, 116, 45, 99, 104, 97, 114, 115, 101, - 116, 103, 122, 105, 112, 44, 32, 100, 101, 102, 108, 97, 116, 101, 97, - 99, 99, 101, 112, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 97, - 99, 99, 101, 112, 116, 45, 114, 97, 110, 103, 101, 115, 97, 99, 99, - 101, 112, 116, 97, 99, 99, 101, 115, 115, 45, 99, 111, 110, 116, 114, - 111, 108, 45, 97, 108, 108, 111, 119, 45, 111, 114, 105, 103, 105, 110, - 97, 103, 101, 97, 108, 108, 111, 119, 97, 117, 116, 104, 111, 114, 105, - 122, 97, 116, 105, 111, 110, 99, 97, 99, 104, 101, 45, 99, 111, 110, - 116, 114, 111, 108, 99, 111, 110, 116, 101, 110, 116, 45, 100, 105, 115, - 112, 111, 115, 105, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, 116, - 45, 108, 97, 110, 103, 117, 97, 103, 101, 99, 111, 110, 116, 101, 110, - 116, 45, 108, 101, 110, 103, 116, 104, 99, 111, 110, 116, 101, 110, 116, - 45, 108, 111, 99, 97, 116, 105, 111, 110, 99, 111, 110, 116, 101, 110, - 116, 45, 114, 97, 110, 103, 101, 99, 111, 111, 107, 105, 101, 100, 97, - 116, 101, 101, 116, 97, 103, 101, 120, 112, 101, 99, 116, 101, 120, 112, - 105, 114, 101, 115, 102, 114, 111, 109, 105, 102, 45, 109, 97, 116, 99, - 104, 105, 102, 45, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, - 110, 99, 101, 105, 102, 45, 110, 111, 110, 101, 45, 109, 97, 116, 99, - 104, 105, 102, 45, 114, 97, 110, 103, 101, 105, 102, 45, 117, 110, 109, - 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, 110, 99, 101, 108, 97, - 115, 116, 45, 109, 111, 100, 105, 102, 105, 101, 100, 108, 105, 110, 107, - 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, 120, 45, 102, 111, 114, - 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, 45, 97, 117, 116, 104, - 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, 111, 120, 121, 45, 97, - 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 114, 97, 110, - 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, 101, 102, 114, 101, 115, - 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, 101, 114, 115, 101, 114, - 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, 107, 105, 101, 115, 116, - 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, 112, 111, 114, 116, 45, - 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, 97, 110, 115, 102, 101, - 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, 118, 97, 114, 121, 118, - 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, - 97, 116, 101, 48, 105, 100, 101, 110, 116, 105, 116, 121, 116, 114, 97, - 105, 108, 101, 114, 115, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, - 110, 47, 103, 114, 112, 99, 103, 114, 112, 99, 80, 85, 84, 108, 98, - 45, 99, 111, 115, 116, 45, 98, 105, 110, 105, 100, 101, 110, 116, 105, - 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 105, 100, 101, 110, 116, - 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, 102, 108, 97, 116, 101, - 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, 105, 116, 121, 44, 100, - 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, 112}; + 46, 115, 101, 114, 118, 105, 99, 101, 46, 108, 111, 97, 100, 95, 115, + 116, 97, 116, 115, 46, 118, 50, 46, 76, 111, 97, 100, 82, 101, 112, + 111, 114, 116, 105, 110, 103, 83, 101, 114, 118, 105, 99, 101, 47, 83, + 116, 114, 101, 97, 109, 76, 111, 97, 100, 83, 116, 97, 116, 115, 47, + 101, 110, 118, 111, 121, 46, 97, 112, 105, 46, 118, 50, 46, 69, 110, + 100, 112, 111, 105, 110, 116, 68, 105, 115, 99, 111, 118, 101, 114, 121, + 83, 101, 114, 118, 105, 99, 101, 47, 83, 116, 114, 101, 97, 109, 69, + 110, 100, 112, 111, 105, 110, 116, 115, 47, 103, 114, 112, 99, 46, 104, + 101, 97, 108, 116, 104, 46, 118, 49, 46, 72, 101, 97, 108, 116, 104, + 47, 87, 97, 116, 99, 104, 47, 101, 110, 118, 111, 121, 46, 115, 101, + 114, 118, 105, 99, 101, 46, 100, 105, 115, 99, 111, 118, 101, 114, 121, + 46, 118, 50, 46, 65, 103, 103, 114, 101, 103, 97, 116, 101, 100, 68, + 105, 115, 99, 111, 118, 101, 114, 121, 83, 101, 114, 118, 105, 99, 101, + 47, 83, 116, 114, 101, 97, 109, 65, 103, 103, 114, 101, 103, 97, 116, + 101, 100, 82, 101, 115, 111, 117, 114, 99, 101, 115, 100, 101, 102, 108, + 97, 116, 101, 103, 122, 105, 112, 115, 116, 114, 101, 97, 109, 47, 103, + 122, 105, 112, 71, 69, 84, 80, 79, 83, 84, 47, 47, 105, 110, 100, + 101, 120, 46, 104, 116, 109, 108, 104, 116, 116, 112, 104, 116, 116, 112, + 115, 50, 48, 48, 50, 48, 52, 50, 48, 54, 51, 48, 52, 52, 48, + 48, 52, 48, 52, 53, 48, 48, 97, 99, 99, 101, 112, 116, 45, 99, + 104, 97, 114, 115, 101, 116, 103, 122, 105, 112, 44, 32, 100, 101, 102, + 108, 97, 116, 101, 97, 99, 99, 101, 112, 116, 45, 108, 97, 110, 103, + 117, 97, 103, 101, 97, 99, 99, 101, 112, 116, 45, 114, 97, 110, 103, + 101, 115, 97, 99, 99, 101, 112, 116, 97, 99, 99, 101, 115, 115, 45, + 99, 111, 110, 116, 114, 111, 108, 45, 97, 108, 108, 111, 119, 45, 111, + 114, 105, 103, 105, 110, 97, 103, 101, 97, 108, 108, 111, 119, 97, 117, + 116, 104, 111, 114, 105, 122, 97, 116, 105, 111, 110, 99, 97, 99, 104, + 101, 45, 99, 111, 110, 116, 114, 111, 108, 99, 111, 110, 116, 101, 110, + 116, 45, 100, 105, 115, 112, 111, 115, 105, 116, 105, 111, 110, 99, 111, + 110, 116, 101, 110, 116, 45, 108, 97, 110, 103, 117, 97, 103, 101, 99, + 111, 110, 116, 101, 110, 116, 45, 108, 101, 110, 103, 116, 104, 99, 111, + 110, 116, 101, 110, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 99, + 111, 110, 116, 101, 110, 116, 45, 114, 97, 110, 103, 101, 99, 111, 111, + 107, 105, 101, 100, 97, 116, 101, 101, 116, 97, 103, 101, 120, 112, 101, + 99, 116, 101, 120, 112, 105, 114, 101, 115, 102, 114, 111, 109, 105, 102, + 45, 109, 97, 116, 99, 104, 105, 102, 45, 109, 111, 100, 105, 102, 105, + 101, 100, 45, 115, 105, 110, 99, 101, 105, 102, 45, 110, 111, 110, 101, + 45, 109, 97, 116, 99, 104, 105, 102, 45, 114, 97, 110, 103, 101, 105, + 102, 45, 117, 110, 109, 111, 100, 105, 102, 105, 101, 100, 45, 115, 105, + 110, 99, 101, 108, 97, 115, 116, 45, 109, 111, 100, 105, 102, 105, 101, + 100, 108, 105, 110, 107, 108, 111, 99, 97, 116, 105, 111, 110, 109, 97, + 120, 45, 102, 111, 114, 119, 97, 114, 100, 115, 112, 114, 111, 120, 121, + 45, 97, 117, 116, 104, 101, 110, 116, 105, 99, 97, 116, 101, 112, 114, + 111, 120, 121, 45, 97, 117, 116, 104, 111, 114, 105, 122, 97, 116, 105, + 111, 110, 114, 97, 110, 103, 101, 114, 101, 102, 101, 114, 101, 114, 114, + 101, 102, 114, 101, 115, 104, 114, 101, 116, 114, 121, 45, 97, 102, 116, + 101, 114, 115, 101, 114, 118, 101, 114, 115, 101, 116, 45, 99, 111, 111, + 107, 105, 101, 115, 116, 114, 105, 99, 116, 45, 116, 114, 97, 110, 115, + 112, 111, 114, 116, 45, 115, 101, 99, 117, 114, 105, 116, 121, 116, 114, + 97, 110, 115, 102, 101, 114, 45, 101, 110, 99, 111, 100, 105, 110, 103, + 118, 97, 114, 121, 118, 105, 97, 119, 119, 119, 45, 97, 117, 116, 104, + 101, 110, 116, 105, 99, 97, 116, 101, 48, 105, 100, 101, 110, 116, 105, + 116, 121, 116, 114, 97, 105, 108, 101, 114, 115, 97, 112, 112, 108, 105, + 99, 97, 116, 105, 111, 110, 47, 103, 114, 112, 99, 103, 114, 112, 99, + 80, 85, 84, 108, 98, 45, 99, 111, 115, 116, 45, 98, 105, 110, 105, + 100, 101, 110, 116, 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, + 105, 100, 101, 110, 116, 105, 116, 121, 44, 103, 122, 105, 112, 100, 101, + 102, 108, 97, 116, 101, 44, 103, 122, 105, 112, 105, 100, 101, 110, 116, + 105, 116, 121, 44, 100, 101, 102, 108, 97, 116, 101, 44, 103, 122, 105, + 112}; grpc_slice_refcount grpc_core::StaticSliceRefcount::kStaticSubRefcount; grpc_core::StaticSliceRefcount @@ -229,6 +234,7 @@ grpc_core::StaticSliceRefcount grpc_core::StaticSliceRefcount(104), grpc_core::StaticSliceRefcount(105), grpc_core::StaticSliceRefcount(106), + grpc_core::StaticSliceRefcount(107), }; const grpc_core::StaticMetadataSlice @@ -302,151 +308,153 @@ const grpc_core::StaticMetadataSlice grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[33].base, 36, g_bytes + 438), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[34].base, - 54, g_bytes + 474), + 65, g_bytes + 474), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[35].base, - 28, g_bytes + 528), + 54, g_bytes + 539), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[36].base, - 80, g_bytes + 556), + 28, g_bytes + 593), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[37].base, - 7, g_bytes + 636), + 80, g_bytes + 621), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, - 4, g_bytes + 643), + 7, g_bytes + 701), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[39].base, - 11, g_bytes + 647), + 4, g_bytes + 708), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[40].base, - 3, g_bytes + 658), + 11, g_bytes + 712), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[41].base, - 4, g_bytes + 661), + 3, g_bytes + 723), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[42].base, - 1, g_bytes + 665), + 4, g_bytes + 726), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[43].base, - 11, g_bytes + 666), + 1, g_bytes + 730), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[44].base, - 4, g_bytes + 677), + 11, g_bytes + 731), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[45].base, - 5, g_bytes + 681), + 4, g_bytes + 742), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[46].base, - 3, g_bytes + 686), + 5, g_bytes + 746), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[47].base, - 3, g_bytes + 689), + 3, g_bytes + 751), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[48].base, - 3, g_bytes + 692), + 3, g_bytes + 754), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[49].base, - 3, g_bytes + 695), + 3, g_bytes + 757), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[50].base, - 3, g_bytes + 698), + 3, g_bytes + 760), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[51].base, - 3, g_bytes + 701), + 3, g_bytes + 763), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[52].base, - 3, g_bytes + 704), + 3, g_bytes + 766), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[53].base, - 14, g_bytes + 707), + 3, g_bytes + 769), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[54].base, - 13, g_bytes + 721), + 14, g_bytes + 772), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[55].base, - 15, g_bytes + 734), + 13, g_bytes + 786), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[56].base, - 13, g_bytes + 749), + 15, g_bytes + 799), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[57].base, - 6, g_bytes + 762), + 13, g_bytes + 814), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[58].base, - 27, g_bytes + 768), + 6, g_bytes + 827), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[59].base, - 3, g_bytes + 795), + 27, g_bytes + 833), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[60].base, - 5, g_bytes + 798), + 3, g_bytes + 860), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[61].base, - 13, g_bytes + 803), + 5, g_bytes + 863), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[62].base, - 13, g_bytes + 816), + 13, g_bytes + 868), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[63].base, - 19, g_bytes + 829), + 13, g_bytes + 881), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[64].base, - 16, g_bytes + 848), + 19, g_bytes + 894), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[65].base, - 14, g_bytes + 864), + 16, g_bytes + 913), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[66].base, - 16, g_bytes + 878), + 14, g_bytes + 929), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[67].base, - 13, g_bytes + 894), + 16, g_bytes + 943), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[68].base, - 6, g_bytes + 907), + 13, g_bytes + 959), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[69].base, - 4, g_bytes + 913), + 6, g_bytes + 972), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[70].base, - 4, g_bytes + 917), + 4, g_bytes + 978), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[71].base, - 6, g_bytes + 921), + 4, g_bytes + 982), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[72].base, - 7, g_bytes + 927), + 6, g_bytes + 986), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[73].base, - 4, g_bytes + 934), + 7, g_bytes + 992), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[74].base, - 8, g_bytes + 938), + 4, g_bytes + 999), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[75].base, - 17, g_bytes + 946), + 8, g_bytes + 1003), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[76].base, - 13, g_bytes + 963), + 17, g_bytes + 1011), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[77].base, - 8, g_bytes + 976), + 13, g_bytes + 1028), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[78].base, - 19, g_bytes + 984), + 8, g_bytes + 1041), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[79].base, - 13, g_bytes + 1003), + 19, g_bytes + 1049), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[80].base, - 4, g_bytes + 1016), + 13, g_bytes + 1068), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[81].base, - 8, g_bytes + 1020), + 4, g_bytes + 1081), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[82].base, - 12, g_bytes + 1028), + 8, g_bytes + 1085), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[83].base, - 18, g_bytes + 1040), + 12, g_bytes + 1093), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[84].base, - 19, g_bytes + 1058), + 18, g_bytes + 1105), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[85].base, - 5, g_bytes + 1077), + 19, g_bytes + 1123), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[86].base, - 7, g_bytes + 1082), + 5, g_bytes + 1142), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[87].base, - 7, g_bytes + 1089), + 7, g_bytes + 1147), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[88].base, - 11, g_bytes + 1096), + 7, g_bytes + 1154), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[89].base, - 6, g_bytes + 1107), + 11, g_bytes + 1161), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[90].base, - 10, g_bytes + 1113), + 6, g_bytes + 1172), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[91].base, - 25, g_bytes + 1123), + 10, g_bytes + 1178), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[92].base, - 17, g_bytes + 1148), + 25, g_bytes + 1188), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[93].base, - 4, g_bytes + 1165), + 17, g_bytes + 1213), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[94].base, - 3, g_bytes + 1169), + 4, g_bytes + 1230), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[95].base, - 16, g_bytes + 1172), + 3, g_bytes + 1234), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[96].base, - 1, g_bytes + 1188), + 16, g_bytes + 1237), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, - 8, g_bytes + 1189), + 1, g_bytes + 1253), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, - 8, g_bytes + 1197), + 8, g_bytes + 1254), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[99].base, - 16, g_bytes + 1205), + 8, g_bytes + 1262), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[100].base, 4, g_bytes + 1221), + &grpc_static_metadata_refcounts[100].base, 16, g_bytes + 1270), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[101].base, 3, g_bytes + 1225), + &grpc_static_metadata_refcounts[101].base, 4, g_bytes + 1286), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[102].base, 11, g_bytes + 1228), + &grpc_static_metadata_refcounts[102].base, 3, g_bytes + 1290), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[103].base, 16, g_bytes + 1239), + &grpc_static_metadata_refcounts[103].base, 11, g_bytes + 1293), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[104].base, 13, g_bytes + 1255), + &grpc_static_metadata_refcounts[104].base, 16, g_bytes + 1304), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[105].base, 12, g_bytes + 1268), + &grpc_static_metadata_refcounts[105].base, 13, g_bytes + 1320), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[106].base, 21, g_bytes + 1280), + &grpc_static_metadata_refcounts[106].base, 12, g_bytes + 1333), + grpc_core::StaticMetadataSlice( + &grpc_static_metadata_refcounts[107].base, 21, g_bytes + 1345), }; /* Warning: the core static metadata currently operates under the soft @@ -892,17 +900,17 @@ uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 4, 6, 6, 8, 8, 2, 4, 4}; static const int8_t elems_r[] = { - 15, 10, -8, 0, 2, -43, -81, -43, 0, 4, -8, 0, 0, 0, 8, - -2, -9, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, -64, 0, -67, -68, -69, -51, -72, - 0, 32, 31, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, - 19, 18, 17, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, - 5, 4, 3, 4, 3, 3, 8, 0, 0, 0, 0, 0, 0, -5, 0}; + 15, 10, -8, 0, 2, -43, -80, -44, 0, 4, -8, 0, 0, 0, 6, -1, + -8, 0, 0, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, -65, 0, -68, -69, -50, -72, -73, 0, 32, 31, + 30, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 17, + 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 4, 3, + 3, 7, 0, 0, 0, 0, 0, 0, -5, 0}; static uint32_t elems_phash(uint32_t i) { - i -= 42; - uint32_t x = i % 105; - uint32_t y = i / 105; + i -= 43; + uint32_t x = i % 106; + uint32_t y = i / 106; uint32_t h = x; if (y < GPR_ARRAY_SIZE(elems_r)) { uint32_t delta = (uint32_t)elems_r[y]; @@ -912,29 +920,29 @@ static uint32_t elems_phash(uint32_t i) { } static const uint16_t elem_keys[] = { - 260, 261, 262, 263, 264, 265, 266, 1107, 1108, 1740, 147, 148, - 472, 473, 1633, 42, 43, 1000, 1001, 1750, 773, 774, 1526, 633, - 1643, 845, 2061, 2168, 5699, 5913, 6020, 6127, 6341, 6448, 6555, 1766, - 6662, 6769, 6876, 6983, 7090, 7197, 7304, 7411, 7518, 7625, 7732, 7839, - 7946, 8053, 8160, 6234, 8267, 8374, 8481, 8588, 8695, 8802, 8909, 9016, - 9123, 9230, 9337, 9444, 9551, 9658, 9765, 1167, 528, 9872, 9979, 208, - 10086, 1173, 1174, 1175, 1176, 1060, 1809, 10193, 10942, 0, 0, 1702, - 0, 1816, 0, 0, 0, 349, 0, 0, 0, 1597, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0}; + 263, 264, 265, 266, 267, 268, 269, 1118, 1119, 1756, 149, 150, + 477, 478, 1648, 43, 44, 1010, 1011, 1540, 1767, 780, 781, 639, + 853, 1659, 2080, 2188, 5860, 6076, 6184, 6400, 6508, 6616, 6724, 6832, + 1783, 6940, 7048, 7156, 7264, 7372, 7480, 7588, 7696, 7804, 7912, 8020, + 8128, 8236, 8344, 6292, 8452, 8560, 8668, 8776, 8884, 8992, 9100, 9208, + 9316, 9424, 9532, 9640, 9748, 9856, 9964, 1178, 533, 10072, 10180, 210, + 10288, 1184, 1185, 1186, 1187, 1070, 10396, 1826, 11152, 0, 0, 0, + 1718, 0, 1833, 0, 0, 352, 0, 1612, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0}; static const uint8_t elem_idxs[] = { - 7, 8, 9, 10, 11, 12, 13, 76, 78, 71, 1, 2, 5, 6, 25, 3, - 4, 66, 65, 83, 62, 63, 30, 67, 73, 61, 57, 37, 14, 16, 17, 18, - 20, 21, 22, 15, 23, 24, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, - 38, 39, 40, 19, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, - 53, 54, 55, 75, 69, 56, 58, 70, 59, 77, 79, 80, 81, 64, 82, 60, - 74, 255, 255, 72, 255, 84, 255, 255, 255, 0, 255, 255, 255, 68}; + 7, 8, 9, 10, 11, 12, 13, 76, 78, 71, 1, 2, 5, 6, 25, 3, + 4, 66, 65, 30, 83, 62, 63, 67, 61, 73, 57, 37, 14, 16, 17, 19, + 20, 21, 22, 23, 15, 24, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, + 38, 39, 40, 18, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 75, 69, 56, 58, 70, 59, 77, 79, 80, 81, 64, 60, 82, + 74, 255, 255, 255, 72, 255, 84, 255, 255, 0, 255, 68}; grpc_mdelem grpc_static_mdelem_for_static_strings(intptr_t a, intptr_t b) { if (a == -1 || b == -1) return GRPC_MDNULL; - uint32_t k = static_cast(a * 107 + b); + uint32_t k = static_cast(a * 108 + b); uint32_t h = elems_phash(k); return h < GPR_ARRAY_SIZE(elem_keys) && elem_keys[h] == k && elem_idxs[h] != 255 @@ -953,144 +961,144 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[1].base, 7, g_bytes + 5), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[40].base, - 3, g_bytes + 658), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[41].base, + 3, g_bytes + 723), 1), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[1].base, 7, g_bytes + 5), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[41].base, - 4, g_bytes + 661), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[42].base, + 4, g_bytes + 726), 2), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[0].base, 5, g_bytes + 0), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[42].base, - 1, g_bytes + 665), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[43].base, + 1, g_bytes + 730), 3), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[0].base, 5, g_bytes + 0), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[43].base, - 11, g_bytes + 666), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[44].base, + 11, g_bytes + 731), 4), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[4].base, 7, g_bytes + 29), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[44].base, - 4, g_bytes + 677), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[45].base, + 4, g_bytes + 742), 5), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[4].base, 7, g_bytes + 29), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[45].base, - 5, g_bytes + 681), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[46].base, + 5, g_bytes + 746), 6), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[46].base, - 3, g_bytes + 686), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[47].base, + 3, g_bytes + 751), 7), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[47].base, - 3, g_bytes + 689), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[48].base, + 3, g_bytes + 754), 8), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[48].base, - 3, g_bytes + 692), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[49].base, + 3, g_bytes + 757), 9), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[49].base, - 3, g_bytes + 695), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[50].base, + 3, g_bytes + 760), 10), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[50].base, - 3, g_bytes + 698), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[51].base, + 3, g_bytes + 763), 11), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[51].base, - 3, g_bytes + 701), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[52].base, + 3, g_bytes + 766), 12), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[2].base, 7, g_bytes + 12), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[52].base, - 3, g_bytes + 704), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[53].base, + 3, g_bytes + 769), 13), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[53].base, - 14, g_bytes + 707), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[54].base, + 14, g_bytes + 772), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 14), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[16].base, 15, g_bytes + 186), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[54].base, - 13, g_bytes + 721), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[55].base, + 13, g_bytes + 786), 15), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[55].base, - 15, g_bytes + 734), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[56].base, + 15, g_bytes + 799), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 16), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[56].base, - 13, g_bytes + 749), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[57].base, + 13, g_bytes + 814), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 17), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[57].base, - 6, g_bytes + 762), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[58].base, + 6, g_bytes + 827), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 18), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[58].base, - 27, g_bytes + 768), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[59].base, + 27, g_bytes + 833), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 19), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[59].base, - 3, g_bytes + 795), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[60].base, + 3, g_bytes + 860), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 20), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[60].base, - 5, g_bytes + 798), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[61].base, + 5, g_bytes + 863), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 21), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[61].base, - 13, g_bytes + 803), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[62].base, + 13, g_bytes + 868), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 22), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[62].base, - 13, g_bytes + 816), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[63].base, + 13, g_bytes + 881), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 23), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[63].base, - 19, g_bytes + 829), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[64].base, + 19, g_bytes + 894), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 24), @@ -1101,26 +1109,26 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { 0, g_bytes + 346), 25), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[64].base, - 16, g_bytes + 848), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[65].base, + 16, g_bytes + 913), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 26), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[65].base, - 14, g_bytes + 864), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[66].base, + 14, g_bytes + 929), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 27), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[66].base, - 16, g_bytes + 878), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[67].base, + 16, g_bytes + 943), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 28), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[67].base, - 13, g_bytes + 894), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[68].base, + 13, g_bytes + 959), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 29), @@ -1131,38 +1139,38 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { 0, g_bytes + 346), 30), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[68].base, - 6, g_bytes + 907), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[69].base, + 6, g_bytes + 972), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 31), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[69].base, - 4, g_bytes + 913), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[70].base, + 4, g_bytes + 978), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 32), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[70].base, - 4, g_bytes + 917), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[71].base, + 4, g_bytes + 982), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 33), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[71].base, - 6, g_bytes + 921), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[72].base, + 6, g_bytes + 986), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 34), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[72].base, - 7, g_bytes + 927), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[73].base, + 7, g_bytes + 992), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 35), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[73].base, - 4, g_bytes + 934), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[74].base, + 4, g_bytes + 999), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 36), @@ -1173,116 +1181,116 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { 0, g_bytes + 346), 37), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[74].base, - 8, g_bytes + 938), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[75].base, + 8, g_bytes + 1003), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 38), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[75].base, - 17, g_bytes + 946), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[76].base, + 17, g_bytes + 1011), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 39), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[76].base, - 13, g_bytes + 963), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[77].base, + 13, g_bytes + 1028), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 40), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[77].base, - 8, g_bytes + 976), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[78].base, + 8, g_bytes + 1041), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 41), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[78].base, - 19, g_bytes + 984), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[79].base, + 19, g_bytes + 1049), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 42), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[79].base, - 13, g_bytes + 1003), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[80].base, + 13, g_bytes + 1068), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 43), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[80].base, - 4, g_bytes + 1016), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[81].base, + 4, g_bytes + 1081), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 44), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[81].base, - 8, g_bytes + 1020), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[82].base, + 8, g_bytes + 1085), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 45), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[82].base, - 12, g_bytes + 1028), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[83].base, + 12, g_bytes + 1093), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 46), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[83].base, - 18, g_bytes + 1040), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[84].base, + 18, g_bytes + 1105), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 47), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[84].base, - 19, g_bytes + 1058), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[85].base, + 19, g_bytes + 1123), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 48), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[85].base, - 5, g_bytes + 1077), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[86].base, + 5, g_bytes + 1142), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 49), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[86].base, - 7, g_bytes + 1082), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[87].base, + 7, g_bytes + 1147), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 50), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[87].base, - 7, g_bytes + 1089), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[88].base, + 7, g_bytes + 1154), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 51), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[88].base, - 11, g_bytes + 1096), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[89].base, + 11, g_bytes + 1161), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 52), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[89].base, - 6, g_bytes + 1107), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[90].base, + 6, g_bytes + 1172), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 53), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[90].base, - 10, g_bytes + 1113), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[91].base, + 10, g_bytes + 1178), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 54), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[91].base, - 25, g_bytes + 1123), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[92].base, + 25, g_bytes + 1188), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 55), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[92].base, - 17, g_bytes + 1148), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[93].base, + 17, g_bytes + 1213), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 56), @@ -1293,28 +1301,28 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { 0, g_bytes + 346), 57), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[93].base, - 4, g_bytes + 1165), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[94].base, + 4, g_bytes + 1230), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 58), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[94].base, - 3, g_bytes + 1169), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[95].base, + 3, g_bytes + 1234), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 59), grpc_core::StaticMetadata( - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[95].base, - 16, g_bytes + 1172), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[96].base, + 16, g_bytes + 1237), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 60), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[7].base, 11, g_bytes + 50), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[96].base, - 1, g_bytes + 1188), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, + 1, g_bytes + 1253), 61), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[7].base, @@ -1331,44 +1339,44 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[9].base, 13, g_bytes + 77), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, - 8, g_bytes + 1189), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, + 8, g_bytes + 1254), 64), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[9].base, 13, g_bytes + 77), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, - 4, g_bytes + 643), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[39].base, + 4, g_bytes + 708), 65), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[9].base, 13, g_bytes + 77), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[37].base, - 7, g_bytes + 636), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, + 7, g_bytes + 701), 66), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[5].base, 2, g_bytes + 36), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, - 8, g_bytes + 1197), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[99].base, + 8, g_bytes + 1262), 67), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[14].base, 12, g_bytes + 158), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[99].base, - 16, g_bytes + 1205), + grpc_core::StaticMetadataSlice( + &grpc_static_metadata_refcounts[100].base, 16, g_bytes + 1270), 68), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[4].base, 7, g_bytes + 29), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[100].base, 4, g_bytes + 1221), + &grpc_static_metadata_refcounts[101].base, 4, g_bytes + 1286), 69), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[1].base, 7, g_bytes + 5), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[101].base, 3, g_bytes + 1225), + &grpc_static_metadata_refcounts[102].base, 3, g_bytes + 1290), 70), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[16].base, @@ -1379,80 +1387,80 @@ grpc_core::StaticMetadata grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = { grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[15].base, 16, g_bytes + 170), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, - 8, g_bytes + 1189), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, + 8, g_bytes + 1254), 72), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[15].base, 16, g_bytes + 170), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, - 4, g_bytes + 643), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[39].base, + 4, g_bytes + 708), 73), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[102].base, 11, g_bytes + 1228), + &grpc_static_metadata_refcounts[103].base, 11, g_bytes + 1293), grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[28].base, 0, g_bytes + 346), 74), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, - 8, g_bytes + 1189), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, + 8, g_bytes + 1254), 75), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[37].base, - 7, g_bytes + 636), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, + 7, g_bytes + 701), 76), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[103].base, 16, g_bytes + 1239), + &grpc_static_metadata_refcounts[104].base, 16, g_bytes + 1304), 77), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, - 4, g_bytes + 643), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[39].base, + 4, g_bytes + 708), 78), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[104].base, 13, g_bytes + 1255), + &grpc_static_metadata_refcounts[105].base, 13, g_bytes + 1320), 79), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[105].base, 12, g_bytes + 1268), + &grpc_static_metadata_refcounts[106].base, 12, g_bytes + 1333), 80), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[10].base, 20, g_bytes + 90), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[106].base, 21, g_bytes + 1280), + &grpc_static_metadata_refcounts[107].base, 21, g_bytes + 1345), 81), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[16].base, 15, g_bytes + 186), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[97].base, - 8, g_bytes + 1189), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[98].base, + 8, g_bytes + 1254), 82), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[16].base, 15, g_bytes + 186), - grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[38].base, - 4, g_bytes + 643), + grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[39].base, + 4, g_bytes + 708), 83), grpc_core::StaticMetadata( grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[16].base, 15, g_bytes + 186), grpc_core::StaticMetadataSlice( - &grpc_static_metadata_refcounts[104].base, 13, g_bytes + 1255), + &grpc_static_metadata_refcounts[105].base, 13, g_bytes + 1320), 84), }; const uint8_t grpc_static_accept_encoding_metadata[8] = {0, 75, 76, 77, diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h index 787bae7ede5..7ea000e1066 100644 --- a/src/core/lib/transport/static_metadata.h +++ b/src/core/lib/transport/static_metadata.h @@ -36,7 +36,7 @@ static_assert( std::is_trivially_destructible::value, "grpc_core::StaticMetadataSlice must be trivially destructible."); -#define GRPC_STATIC_MDSTR_COUNT 107 +#define GRPC_STATIC_MDSTR_COUNT 108 extern const grpc_core::StaticMetadataSlice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]; /* ":path" */ @@ -111,157 +111,160 @@ extern const grpc_core::StaticMetadataSlice /* "/grpc.lb.v1.LoadBalancer/BalanceLoad" */ #define GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD \ (grpc_static_slice_table[33]) +/* "/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats" */ +#define GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_LOAD_STATS_DOT_V2_DOT_LOADREPORTINGSERVICE_SLASH_STREAMLOADSTATS \ + (grpc_static_slice_table[34]) /* "/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints" */ #define GRPC_MDSTR_SLASH_ENVOY_DOT_API_DOT_V2_DOT_ENDPOINTDISCOVERYSERVICE_SLASH_STREAMENDPOINTS \ - (grpc_static_slice_table[34]) + (grpc_static_slice_table[35]) /* "/grpc.health.v1.Health/Watch" */ #define GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH \ - (grpc_static_slice_table[35]) + (grpc_static_slice_table[36]) /* "/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources" */ #define GRPC_MDSTR_SLASH_ENVOY_DOT_SERVICE_DOT_DISCOVERY_DOT_V2_DOT_AGGREGATEDDISCOVERYSERVICE_SLASH_STREAMAGGREGATEDRESOURCES \ - (grpc_static_slice_table[36]) + (grpc_static_slice_table[37]) /* "deflate" */ -#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[37]) +#define GRPC_MDSTR_DEFLATE (grpc_static_slice_table[38]) /* "gzip" */ -#define GRPC_MDSTR_GZIP (grpc_static_slice_table[38]) +#define GRPC_MDSTR_GZIP (grpc_static_slice_table[39]) /* "stream/gzip" */ -#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[39]) +#define GRPC_MDSTR_STREAM_SLASH_GZIP (grpc_static_slice_table[40]) /* "GET" */ -#define GRPC_MDSTR_GET (grpc_static_slice_table[40]) +#define GRPC_MDSTR_GET (grpc_static_slice_table[41]) /* "POST" */ -#define GRPC_MDSTR_POST (grpc_static_slice_table[41]) +#define GRPC_MDSTR_POST (grpc_static_slice_table[42]) /* "/" */ -#define GRPC_MDSTR_SLASH (grpc_static_slice_table[42]) +#define GRPC_MDSTR_SLASH (grpc_static_slice_table[43]) /* "/index.html" */ -#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[43]) +#define GRPC_MDSTR_SLASH_INDEX_DOT_HTML (grpc_static_slice_table[44]) /* "http" */ -#define GRPC_MDSTR_HTTP (grpc_static_slice_table[44]) +#define GRPC_MDSTR_HTTP (grpc_static_slice_table[45]) /* "https" */ -#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[45]) +#define GRPC_MDSTR_HTTPS (grpc_static_slice_table[46]) /* "200" */ -#define GRPC_MDSTR_200 (grpc_static_slice_table[46]) +#define GRPC_MDSTR_200 (grpc_static_slice_table[47]) /* "204" */ -#define GRPC_MDSTR_204 (grpc_static_slice_table[47]) +#define GRPC_MDSTR_204 (grpc_static_slice_table[48]) /* "206" */ -#define GRPC_MDSTR_206 (grpc_static_slice_table[48]) +#define GRPC_MDSTR_206 (grpc_static_slice_table[49]) /* "304" */ -#define GRPC_MDSTR_304 (grpc_static_slice_table[49]) +#define GRPC_MDSTR_304 (grpc_static_slice_table[50]) /* "400" */ -#define GRPC_MDSTR_400 (grpc_static_slice_table[50]) +#define GRPC_MDSTR_400 (grpc_static_slice_table[51]) /* "404" */ -#define GRPC_MDSTR_404 (grpc_static_slice_table[51]) +#define GRPC_MDSTR_404 (grpc_static_slice_table[52]) /* "500" */ -#define GRPC_MDSTR_500 (grpc_static_slice_table[52]) +#define GRPC_MDSTR_500 (grpc_static_slice_table[53]) /* "accept-charset" */ -#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[53]) +#define GRPC_MDSTR_ACCEPT_CHARSET (grpc_static_slice_table[54]) /* "gzip, deflate" */ -#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[54]) +#define GRPC_MDSTR_GZIP_COMMA_DEFLATE (grpc_static_slice_table[55]) /* "accept-language" */ -#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[55]) +#define GRPC_MDSTR_ACCEPT_LANGUAGE (grpc_static_slice_table[56]) /* "accept-ranges" */ -#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[56]) +#define GRPC_MDSTR_ACCEPT_RANGES (grpc_static_slice_table[57]) /* "accept" */ -#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[57]) +#define GRPC_MDSTR_ACCEPT (grpc_static_slice_table[58]) /* "access-control-allow-origin" */ -#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[58]) +#define GRPC_MDSTR_ACCESS_CONTROL_ALLOW_ORIGIN (grpc_static_slice_table[59]) /* "age" */ -#define GRPC_MDSTR_AGE (grpc_static_slice_table[59]) +#define GRPC_MDSTR_AGE (grpc_static_slice_table[60]) /* "allow" */ -#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[60]) +#define GRPC_MDSTR_ALLOW (grpc_static_slice_table[61]) /* "authorization" */ -#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[61]) +#define GRPC_MDSTR_AUTHORIZATION (grpc_static_slice_table[62]) /* "cache-control" */ -#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[62]) +#define GRPC_MDSTR_CACHE_CONTROL (grpc_static_slice_table[63]) /* "content-disposition" */ -#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[63]) +#define GRPC_MDSTR_CONTENT_DISPOSITION (grpc_static_slice_table[64]) /* "content-language" */ -#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[64]) +#define GRPC_MDSTR_CONTENT_LANGUAGE (grpc_static_slice_table[65]) /* "content-length" */ -#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[65]) +#define GRPC_MDSTR_CONTENT_LENGTH (grpc_static_slice_table[66]) /* "content-location" */ -#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[66]) +#define GRPC_MDSTR_CONTENT_LOCATION (grpc_static_slice_table[67]) /* "content-range" */ -#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[67]) +#define GRPC_MDSTR_CONTENT_RANGE (grpc_static_slice_table[68]) /* "cookie" */ -#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[68]) +#define GRPC_MDSTR_COOKIE (grpc_static_slice_table[69]) /* "date" */ -#define GRPC_MDSTR_DATE (grpc_static_slice_table[69]) +#define GRPC_MDSTR_DATE (grpc_static_slice_table[70]) /* "etag" */ -#define GRPC_MDSTR_ETAG (grpc_static_slice_table[70]) +#define GRPC_MDSTR_ETAG (grpc_static_slice_table[71]) /* "expect" */ -#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[71]) +#define GRPC_MDSTR_EXPECT (grpc_static_slice_table[72]) /* "expires" */ -#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[72]) +#define GRPC_MDSTR_EXPIRES (grpc_static_slice_table[73]) /* "from" */ -#define GRPC_MDSTR_FROM (grpc_static_slice_table[73]) +#define GRPC_MDSTR_FROM (grpc_static_slice_table[74]) /* "if-match" */ -#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[74]) +#define GRPC_MDSTR_IF_MATCH (grpc_static_slice_table[75]) /* "if-modified-since" */ -#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[75]) +#define GRPC_MDSTR_IF_MODIFIED_SINCE (grpc_static_slice_table[76]) /* "if-none-match" */ -#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[76]) +#define GRPC_MDSTR_IF_NONE_MATCH (grpc_static_slice_table[77]) /* "if-range" */ -#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[77]) +#define GRPC_MDSTR_IF_RANGE (grpc_static_slice_table[78]) /* "if-unmodified-since" */ -#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[78]) +#define GRPC_MDSTR_IF_UNMODIFIED_SINCE (grpc_static_slice_table[79]) /* "last-modified" */ -#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[79]) +#define GRPC_MDSTR_LAST_MODIFIED (grpc_static_slice_table[80]) /* "link" */ -#define GRPC_MDSTR_LINK (grpc_static_slice_table[80]) +#define GRPC_MDSTR_LINK (grpc_static_slice_table[81]) /* "location" */ -#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[81]) +#define GRPC_MDSTR_LOCATION (grpc_static_slice_table[82]) /* "max-forwards" */ -#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[82]) +#define GRPC_MDSTR_MAX_FORWARDS (grpc_static_slice_table[83]) /* "proxy-authenticate" */ -#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[83]) +#define GRPC_MDSTR_PROXY_AUTHENTICATE (grpc_static_slice_table[84]) /* "proxy-authorization" */ -#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[84]) +#define GRPC_MDSTR_PROXY_AUTHORIZATION (grpc_static_slice_table[85]) /* "range" */ -#define GRPC_MDSTR_RANGE (grpc_static_slice_table[85]) +#define GRPC_MDSTR_RANGE (grpc_static_slice_table[86]) /* "referer" */ -#define GRPC_MDSTR_REFERER (grpc_static_slice_table[86]) +#define GRPC_MDSTR_REFERER (grpc_static_slice_table[87]) /* "refresh" */ -#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[87]) +#define GRPC_MDSTR_REFRESH (grpc_static_slice_table[88]) /* "retry-after" */ -#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[88]) +#define GRPC_MDSTR_RETRY_AFTER (grpc_static_slice_table[89]) /* "server" */ -#define GRPC_MDSTR_SERVER (grpc_static_slice_table[89]) +#define GRPC_MDSTR_SERVER (grpc_static_slice_table[90]) /* "set-cookie" */ -#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[90]) +#define GRPC_MDSTR_SET_COOKIE (grpc_static_slice_table[91]) /* "strict-transport-security" */ -#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[91]) +#define GRPC_MDSTR_STRICT_TRANSPORT_SECURITY (grpc_static_slice_table[92]) /* "transfer-encoding" */ -#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[92]) +#define GRPC_MDSTR_TRANSFER_ENCODING (grpc_static_slice_table[93]) /* "vary" */ -#define GRPC_MDSTR_VARY (grpc_static_slice_table[93]) +#define GRPC_MDSTR_VARY (grpc_static_slice_table[94]) /* "via" */ -#define GRPC_MDSTR_VIA (grpc_static_slice_table[94]) +#define GRPC_MDSTR_VIA (grpc_static_slice_table[95]) /* "www-authenticate" */ -#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[95]) +#define GRPC_MDSTR_WWW_AUTHENTICATE (grpc_static_slice_table[96]) /* "0" */ -#define GRPC_MDSTR_0 (grpc_static_slice_table[96]) +#define GRPC_MDSTR_0 (grpc_static_slice_table[97]) /* "identity" */ -#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[97]) +#define GRPC_MDSTR_IDENTITY (grpc_static_slice_table[98]) /* "trailers" */ -#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[98]) +#define GRPC_MDSTR_TRAILERS (grpc_static_slice_table[99]) /* "application/grpc" */ -#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[99]) +#define GRPC_MDSTR_APPLICATION_SLASH_GRPC (grpc_static_slice_table[100]) /* "grpc" */ -#define GRPC_MDSTR_GRPC (grpc_static_slice_table[100]) +#define GRPC_MDSTR_GRPC (grpc_static_slice_table[101]) /* "PUT" */ -#define GRPC_MDSTR_PUT (grpc_static_slice_table[101]) +#define GRPC_MDSTR_PUT (grpc_static_slice_table[102]) /* "lb-cost-bin" */ -#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[102]) +#define GRPC_MDSTR_LB_COST_BIN (grpc_static_slice_table[103]) /* "identity,deflate" */ -#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[103]) +#define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE (grpc_static_slice_table[104]) /* "identity,gzip" */ -#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[104]) +#define GRPC_MDSTR_IDENTITY_COMMA_GZIP (grpc_static_slice_table[105]) /* "deflate,gzip" */ -#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[105]) +#define GRPC_MDSTR_DEFLATE_COMMA_GZIP (grpc_static_slice_table[106]) /* "identity,deflate,gzip" */ #define GRPC_MDSTR_IDENTITY_COMMA_DEFLATE_COMMA_GZIP \ - (grpc_static_slice_table[106]) + (grpc_static_slice_table[107]) namespace grpc_core { struct StaticSliceRefcount; diff --git a/src/proto/grpc/lb/v2/BUILD b/src/proto/grpc/lb/v2/BUILD index bf0f1a78b1c..0b46b2892ed 100644 --- a/src/proto/grpc/lb/v2/BUILD +++ b/src/proto/grpc/lb/v2/BUILD @@ -22,10 +22,20 @@ grpc_package( ) grpc_proto_library( - name = "xds_for_test_proto", + name = "eds_for_test_proto", srcs = [ - "xds_for_test.proto", + "eds_for_test.proto", ], has_services = True, well_known_protos = True, ) + +grpc_proto_library( + name = "lrs_for_test_proto", + srcs = [ + "lrs_for_test.proto", + ], + has_services = True, + well_known_protos = True, + deps = [":eds_for_test_proto"], +) diff --git a/src/proto/grpc/lb/v2/xds_for_test.proto b/src/proto/grpc/lb/v2/eds_for_test.proto similarity index 100% rename from src/proto/grpc/lb/v2/xds_for_test.proto rename to src/proto/grpc/lb/v2/eds_for_test.proto diff --git a/src/proto/grpc/lb/v2/lrs_for_test.proto b/src/proto/grpc/lb/v2/lrs_for_test.proto new file mode 100644 index 00000000000..76c560a99cf --- /dev/null +++ b/src/proto/grpc/lb/v2/lrs_for_test.proto @@ -0,0 +1,180 @@ +// Copyright 2019 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains the eds protocol and its dependency. +// +// TODO(juanlishen): It's a workaround and should be removed once we have a +// clean solution to the circular dependency between the envoy data plane APIs +// and gRPC. We can't check in this file due to conflict with internal code. + +syntax = "proto3"; + +package envoy.service.load_stats.v2; + +import "google/protobuf/duration.proto"; +import "src/proto/grpc/lb/v2/eds_for_test.proto"; + +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message EndpointLoadMetricStats { + // Name of the metric; may be empty. + string metric_name = 1; + + // Number of calls that finished and included this metric. + uint64 num_requests_finished_with_metric = 2; + + // Sum of metric values across all calls that finished with this metric for + // load_reporting_interval. + double total_metric_value = 3; +} + +message UpstreamLocalityStats { + // Name of zone, region and optionally endpoint group these metrics were + // collected from. Zone and region names could be empty if unknown. + envoy.api.v2.Locality locality = 1; + + // The total number of requests successfully completed by the endpoints in the + // locality. + uint64 total_successful_requests = 2; + + // The total number of unfinished requests + uint64 total_requests_in_progress = 3; + + // The total number of requests that failed due to errors at the endpoint, + // aggregated over all endpoints in the locality. + uint64 total_error_requests = 4; + + // The total number of requests that were issued by this Envoy since + // the last report. This information is aggregated over all the + // upstream endpoints in the locality. + uint64 total_issued_requests = 8; + + // Stats for multi-dimensional load balancing. + repeated EndpointLoadMetricStats load_metric_stats = 5; + +// // Endpoint granularity stats information for this locality. This information +// // is populated if the Server requests it by setting +// // :ref:`LoadStatsResponse.report_endpoint_granularity`. +// repeated UpstreamEndpointStats upstream_endpoint_stats = 7; + + // [#not-implemented-hide:] The priority of the endpoint group these metrics + // were collected from. + uint32 priority = 6; +} + +// Per cluster load stats. Envoy reports these stats a management server in a +// :ref:`LoadStatsRequest` +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +// Next ID: 7 +message ClusterStats { + // The name of the cluster. + string cluster_name = 1; + + // The eds_cluster_config service_name of the cluster. + // It's possible that two clusters send the same service_name to EDS, + // in that case, the management server is supposed to do aggregation on the load reports. + string cluster_service_name = 6; + + // Need at least one. + repeated UpstreamLocalityStats upstream_locality_stats = 2; + + // Cluster-level stats such as total_successful_requests may be computed by + // summing upstream_locality_stats. In addition, below there are additional + // cluster-wide stats. + // + // The total number of dropped requests. This covers requests + // deliberately dropped by the drop_overload policy and circuit breaking. + uint64 total_dropped_requests = 3; + + message DroppedRequests { + // Identifier for the policy specifying the drop. + string category = 1; + // Total number of deliberately dropped requests for the category. + uint64 dropped_count = 2; + } + // Information about deliberately dropped requests for each category specified + // in the DropOverload policy. + repeated DroppedRequests dropped_requests = 5; + + // Period over which the actual load report occurred. This will be guaranteed to include every + // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy + // and the *LoadStatsResponse* message sent from the management server, this may be longer than + // the requested load reporting interval in the *LoadStatsResponse*. + google.protobuf.Duration load_report_interval = 4; +} + +// [#protodoc-title: Load reporting service] + +service LoadReportingService { + // Advanced API to allow for multi-dimensional load balancing by remote + // server. For receiving LB assignments, the steps are: + // 1, The management server is configured with per cluster/zone/load metric + // capacity configuration. The capacity configuration definition is + // outside of the scope of this document. + // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters + // to balance. + // + // Independently, Envoy will initiate a StreamLoadStats bidi stream with a + // management server: + // 1. Once a connection establishes, the management server publishes a + // LoadStatsResponse for all clusters it is interested in learning load + // stats about. + // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts + // based on per-zone weights and/or per-instance weights (if specified) + // based on intra-zone LbPolicy. This information comes from the above + // {Stream,Fetch}Endpoints. + // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. + // 4. Envoy aggregates load reports over the period of time given to it in + // LoadStatsResponse.load_reporting_interval. This includes aggregation + // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as + // well as load metrics from upstream hosts. + // 5. When the timer of load_reporting_interval expires, Envoy sends new + // LoadStatsRequest filled with load reports for each cluster. + // 6. The management server uses the load reports from all reported Envoys + // from around the world, computes global assignment and prepares traffic + // assignment destined for each zone Envoys are located in. Goto 2. + rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { + } +} + +// A load report Envoy sends to the management server. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsRequest { + // Node identifier for Envoy instance. + envoy.api.v2.Node node = 1; + + // A list of load stats to report. + repeated ClusterStats cluster_stats = 2; +} + +// The management server sends envoy a LoadStatsResponse with all clusters it +// is interested in learning load stats about. +// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. +message LoadStatsResponse { + // Clusters to report stats for. + repeated string clusters = 1; + + // The minimum interval of time to collect stats over. This is only a minimum for two reasons: + // 1. There may be some delay from when the timer fires until stats sampling occurs. + // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic + // that is observed in between the corresponding previous *LoadStatsRequest* and this + // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period + // of inobservability that might otherwise exists between the messages. New clusters are not + // subject to this consideration. + google.protobuf.Duration load_reporting_interval = 2; + + // Set to *true* if the management server supports endpoint granularity + // report. + bool report_endpoint_granularity = 3; +} diff --git a/test/core/end2end/fuzzers/hpack.dictionary b/test/core/end2end/fuzzers/hpack.dictionary index 24e3320dd71..921018db7f8 100644 --- a/test/core/end2end/fuzzers/hpack.dictionary +++ b/test/core/end2end/fuzzers/hpack.dictionary @@ -33,6 +33,7 @@ "\x1Egrpc.max_request_message_bytes" "\x1Fgrpc.max_response_message_bytes" "$/grpc.lb.v1.LoadBalancer/BalanceLoad" +"A/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats" "6/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints" "\x1C/grpc.health.v1.Health/Watch" "P/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources" diff --git a/test/core/util/test_lb_policies.cc b/test/core/util/test_lb_policies.cc index 12a042ab827..5ee2f5fe049 100644 --- a/test/core/util/test_lb_policies.cc +++ b/test/core/util/test_lb_policies.cc @@ -188,7 +188,7 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy private: static void RecordRecvTrailingMetadata( - void* arg, MetadataInterface* recv_trailing_metadata, + void* arg, grpc_error* error, MetadataInterface* recv_trailing_metadata, CallState* call_state) { TrailingMetadataHandler* self = static_cast(arg); diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD index 4b83d0615af..f3cc14cb848 100644 --- a/test/cpp/end2end/BUILD +++ b/test/cpp/end2end/BUILD @@ -491,8 +491,8 @@ grpc_cc_test( "//:grpc", "//:grpc++", "//:grpc_resolver_fake", - "//src/proto/grpc/lb/v1:load_balancer_proto", - "//src/proto/grpc/lb/v2:xds_for_test_proto", + "//src/proto/grpc/lb/v2:eds_for_test_proto", + "//src/proto/grpc/lb/v2:lrs_for_test_proto", "//src/proto/grpc/testing:echo_messages_proto", "//src/proto/grpc/testing:echo_proto", "//src/proto/grpc/testing/duplicate:echo_duplicate_proto", diff --git a/test/cpp/end2end/xds_end2end_test.cc b/test/cpp/end2end/xds_end2end_test.cc index 5aa20e27a49..d62dc828818 100644 --- a/test/cpp/end2end/xds_end2end_test.cc +++ b/test/cpp/end2end/xds_end2end_test.cc @@ -38,7 +38,9 @@ #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h" #include "src/core/ext/filters/client_channel/server_address.h" #include "src/core/lib/gpr/env.h" +#include "src/core/lib/gprpp/map.h" #include "src/core/lib/gprpp/ref_counted_ptr.h" +#include "src/core/lib/gprpp/sync.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/security/credentials/fake/fake_credentials.h" #include "src/cpp/client/secure_credentials.h" @@ -48,7 +50,8 @@ #include "test/core/util/test_config.h" #include "test/cpp/end2end/test_service_impl.h" -#include "src/proto/grpc/lb/v2/xds_for_test.grpc.pb.h" +#include "src/proto/grpc/lb/v2/eds_for_test.grpc.pb.h" +#include "src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.h" #include "src/proto/grpc/testing/echo.grpc.pb.h" #include @@ -81,6 +84,11 @@ using ::envoy::api::v2::ClusterLoadAssignment; using ::envoy::api::v2::DiscoveryRequest; using ::envoy::api::v2::DiscoveryResponse; using ::envoy::api::v2::EndpointDiscoveryService; +using ::envoy::service::load_stats::v2::ClusterStats; +using ::envoy::service::load_stats::v2::LoadReportingService; +using ::envoy::service::load_stats::v2::LoadStatsRequest; +using ::envoy::service::load_stats::v2::LoadStatsResponse; +using ::envoy::service::load_stats::v2::UpstreamLocalityStats; constexpr char kEdsTypeUrl[] = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment"; @@ -92,32 +100,32 @@ template class CountedService : public ServiceType { public: size_t request_count() { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&mu_); return request_count_; } size_t response_count() { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&mu_); return response_count_; } void IncreaseResponseCount() { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&mu_); ++response_count_; } void IncreaseRequestCount() { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&mu_); ++request_count_; } void ResetCounters() { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&mu_); request_count_ = 0; response_count_ = 0; } protected: - grpc::internal::Mutex mu_; + grpc_core::Mutex mu_; private: size_t request_count_ = 0; @@ -125,7 +133,8 @@ class CountedService : public ServiceType { }; using BackendService = CountedService; -using BalancerService = CountedService; +using EdsService = CountedService; +using LrsService = CountedService; const char g_kCallCredsMdKey[] = "Balancer should not ..."; const char g_kCallCredsMdValue[] = "... receive me"; @@ -150,130 +159,169 @@ class BackendServiceImpl : public BackendService { return status; } + void Start() {} void Shutdown() {} std::set clients() { - grpc::internal::MutexLock lock(&clients_mu_); + grpc_core::MutexLock lock(&clients_mu_); return clients_; } private: void AddClient(const grpc::string& client) { - grpc::internal::MutexLock lock(&clients_mu_); + grpc_core::MutexLock lock(&clients_mu_); clients_.insert(client); } - grpc::internal::Mutex mu_; - grpc::internal::Mutex clients_mu_; + grpc_core::Mutex mu_; + grpc_core::Mutex clients_mu_; std::set clients_; }; -struct ClientStats { - size_t num_calls_started = 0; - size_t num_calls_finished = 0; - size_t num_calls_finished_with_client_failed_to_send = 0; - size_t num_calls_finished_known_received = 0; - std::map drop_token_counts; - - ClientStats& operator+=(const ClientStats& other) { - num_calls_started += other.num_calls_started; - num_calls_finished += other.num_calls_finished; - num_calls_finished_with_client_failed_to_send += - other.num_calls_finished_with_client_failed_to_send; - num_calls_finished_known_received += - other.num_calls_finished_known_received; - for (const auto& p : other.drop_token_counts) { - drop_token_counts[p.first] += p.second; +class ClientStats { + public: + struct LocalityStats { + // Converts from proto message class. + LocalityStats(const UpstreamLocalityStats& upstream_locality_stats) + : total_successful_requests( + upstream_locality_stats.total_successful_requests()), + total_requests_in_progress( + upstream_locality_stats.total_requests_in_progress()), + total_error_requests(upstream_locality_stats.total_error_requests()), + total_issued_requests( + upstream_locality_stats.total_issued_requests()) {} + + uint64_t total_successful_requests; + uint64_t total_requests_in_progress; + uint64_t total_error_requests; + uint64_t total_issued_requests; + }; + + // Converts from proto message class. + ClientStats(const ClusterStats& cluster_stats) + : total_dropped_requests_(cluster_stats.total_dropped_requests()) { + for (const auto& input_locality_stats : + cluster_stats.upstream_locality_stats()) { + locality_stats_.emplace(input_locality_stats.locality().sub_zone(), + LocalityStats(input_locality_stats)); } - return *this; } - void Reset() { - num_calls_started = 0; - num_calls_finished = 0; - num_calls_finished_with_client_failed_to_send = 0; - num_calls_finished_known_received = 0; - drop_token_counts.clear(); + uint64_t total_successful_requests() const { + uint64_t sum = 0; + for (auto& p : locality_stats_) { + sum += p.second.total_successful_requests; + } + return sum; + } + uint64_t total_requests_in_progress() const { + uint64_t sum = 0; + for (auto& p : locality_stats_) { + sum += p.second.total_requests_in_progress; + } + return sum; + } + uint64_t total_error_requests() const { + uint64_t sum = 0; + for (auto& p : locality_stats_) { + sum += p.second.total_error_requests; + } + return sum; } + uint64_t total_issued_requests() const { + uint64_t sum = 0; + for (auto& p : locality_stats_) { + sum += p.second.total_issued_requests; + } + return sum; + } + uint64_t total_dropped_requests() const { return total_dropped_requests_; } + + private: + std::map locality_stats_; + uint64_t total_dropped_requests_; }; -class BalancerServiceImpl : public BalancerService { +class EdsServiceImpl : public EdsService { public: using Stream = ServerReaderWriter; using ResponseDelayPair = std::pair; - explicit BalancerServiceImpl(int client_load_reporting_interval_seconds) - : client_load_reporting_interval_seconds_( - client_load_reporting_interval_seconds) {} - Status StreamEndpoints(ServerContext* context, Stream* stream) override { - // TODO(juanlishen): Clean up the scoping. - gpr_log(GPR_INFO, "LB[%p]: EDS starts", this); - { - grpc::internal::MutexLock lock(&mu_); - if (serverlist_done_) goto done; - } - { + gpr_log(GPR_INFO, "LB[%p]: EDS StreamEndpoints starts", this); + [&]() { + { + grpc_core::MutexLock lock(&eds_mu_); + if (eds_done_) return; + } // Balancer shouldn't receive the call credentials metadata. EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey), context->client_metadata().end()); + // Read request. DiscoveryRequest request; - std::vector responses_and_delays; - if (!stream->Read(&request)) goto done; + if (!stream->Read(&request)) return; IncreaseRequestCount(); gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this, request.DebugString().c_str()); + // Send response. + std::vector responses_and_delays; { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&eds_mu_); responses_and_delays = responses_and_delays_; } for (const auto& response_and_delay : responses_and_delays) { SendResponse(stream, response_and_delay.first, response_and_delay.second); } - { - grpc::internal::MutexLock lock(&mu_); - serverlist_cond_.WaitUntil(&mu_, [this] { return serverlist_done_; }); - } - if (client_load_reporting_interval_seconds_ > 0) { - // TODO(juanlishen): Use LRS to receive load report. - } - } - done: - gpr_log(GPR_INFO, "LB[%p]: done", this); + // Wait until notified done. + grpc_core::MutexLock lock(&eds_mu_); + eds_cond_.WaitUntil(&eds_mu_, [this] { return eds_done_; }); + }(); + gpr_log(GPR_INFO, "LB[%p]: EDS StreamEndpoints done", this); return Status::OK; } void add_response(const DiscoveryResponse& response, int send_after_ms) { - grpc::internal::MutexLock lock(&mu_); + grpc_core::MutexLock lock(&eds_mu_); responses_and_delays_.push_back(std::make_pair(response, send_after_ms)); } - void Shutdown() { - grpc::internal::MutexLock lock(&mu_); - NotifyDoneWithServerlistsLocked(); + void Start() { + grpc_core::MutexLock lock(&eds_mu_); + eds_done_ = false; responses_and_delays_.clear(); + } + + void Shutdown() { + { + grpc_core::MutexLock lock(&eds_mu_); + NotifyDoneWithEdsCallLocked(); + responses_and_delays_.clear(); + } gpr_log(GPR_INFO, "LB[%p]: shut down", this); } static DiscoveryResponse BuildResponseForBackends( - const std::vector& backend_ports, - const std::map& drop_token_counts) { + const std::vector>& backend_ports) { ClusterLoadAssignment assignment; assignment.set_cluster_name("service name"); - auto* endpoints = assignment.add_endpoints(); - endpoints->mutable_load_balancing_weight()->set_value(3); - endpoints->set_priority(0); - endpoints->mutable_locality()->set_region(kDefaultLocalityRegion); - endpoints->mutable_locality()->set_zone(kDefaultLocalityZone); - endpoints->mutable_locality()->set_sub_zone(kDefaultLocalitySubzone); - for (const int& backend_port : backend_ports) { - auto* lb_endpoints = endpoints->add_lb_endpoints(); - auto* endpoint = lb_endpoints->mutable_endpoint(); - auto* address = endpoint->mutable_address(); - auto* socket_address = address->mutable_socket_address(); - socket_address->set_address("127.0.0.1"); - socket_address->set_port_value(backend_port); + for (size_t i = 0; i < backend_ports.size(); ++i) { + auto* endpoints = assignment.add_endpoints(); + endpoints->mutable_load_balancing_weight()->set_value(3); + endpoints->set_priority(0); + endpoints->mutable_locality()->set_region(kDefaultLocalityRegion); + endpoints->mutable_locality()->set_zone(kDefaultLocalityZone); + std::ostringstream sub_zone; + sub_zone << kDefaultLocalitySubzone << '_' << i; + endpoints->mutable_locality()->set_sub_zone(sub_zone.str()); + for (const int& backend_port : backend_ports[i]) { + auto* lb_endpoints = endpoints->add_lb_endpoints(); + auto* endpoint = lb_endpoints->mutable_endpoint(); + auto* address = endpoint->mutable_address(); + auto* socket_address = address->mutable_socket_address(); + socket_address->set_address("127.0.0.1"); + socket_address->set_port_value(backend_port); + } } DiscoveryResponse response; response.set_type_url(kEdsTypeUrl); @@ -281,15 +329,15 @@ class BalancerServiceImpl : public BalancerService { return response; } - void NotifyDoneWithServerlists() { - grpc::internal::MutexLock lock(&mu_); - NotifyDoneWithServerlistsLocked(); + void NotifyDoneWithEdsCall() { + grpc_core::MutexLock lock(&eds_mu_); + NotifyDoneWithEdsCallLocked(); } - void NotifyDoneWithServerlistsLocked() { - if (!serverlist_done_) { - serverlist_done_ = true; - serverlist_cond_.Broadcast(); + void NotifyDoneWithEdsCallLocked() { + if (!eds_done_) { + eds_done_ = true; + eds_cond_.Broadcast(); } } @@ -306,11 +354,108 @@ class BalancerServiceImpl : public BalancerService { stream->Write(response); } - const int client_load_reporting_interval_seconds_; + grpc_core::CondVar eds_cond_; + // Protect the members below. + grpc_core::Mutex eds_mu_; + bool eds_done_ = false; std::vector responses_and_delays_; - grpc::internal::Mutex mu_; - grpc::internal::CondVar serverlist_cond_; - bool serverlist_done_ = false; +}; + +class LrsServiceImpl : public LrsService { + public: + using Stream = ServerReaderWriter; + + explicit LrsServiceImpl(int client_load_reporting_interval_seconds) + : client_load_reporting_interval_seconds_( + client_load_reporting_interval_seconds) {} + + Status StreamLoadStats(ServerContext* context, Stream* stream) override { + gpr_log(GPR_INFO, "LB[%p]: LRS StreamLoadStats starts", this); + // Read request. + LoadStatsRequest request; + if (stream->Read(&request)) { + if (client_load_reporting_interval_seconds_ > 0) { + IncreaseRequestCount(); + // Send response. + LoadStatsResponse response; + auto server_name = request.cluster_stats()[0].cluster_name(); + GPR_ASSERT(server_name != ""); + response.add_clusters(server_name); + response.mutable_load_reporting_interval()->set_seconds( + client_load_reporting_interval_seconds_); + stream->Write(response); + IncreaseResponseCount(); + // Wait for report. + request.Clear(); + if (stream->Read(&request)) { + gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'", + this, request.DebugString().c_str()); + GPR_ASSERT(request.cluster_stats().size() == 1); + const ClusterStats& cluster_stats = request.cluster_stats()[0]; + // We need to acquire the lock here in order to prevent the notify_one + // below from firing before its corresponding wait is executed. + grpc_core::MutexLock lock(&load_report_mu_); + GPR_ASSERT(client_stats_ == nullptr); + client_stats_.reset(new ClientStats(cluster_stats)); + load_report_ready_ = true; + load_report_cond_.Signal(); + } + } + // Wait until notified done. + grpc_core::MutexLock lock(&lrs_mu_); + lrs_cv_.WaitUntil(&lrs_mu_, [this] { return lrs_done; }); + } + gpr_log(GPR_INFO, "LB[%p]: LRS done", this); + return Status::OK; + } + + void Start() { + lrs_done = false; + load_report_ready_ = false; + client_stats_.reset(); + } + + void Shutdown() { + { + grpc_core::MutexLock lock(&lrs_mu_); + NotifyDoneWithLrsCallLocked(); + } + gpr_log(GPR_INFO, "LB[%p]: shut down", this); + } + + ClientStats* WaitForLoadReport() { + grpc_core::MutexLock lock(&load_report_mu_); + load_report_cond_.WaitUntil(&load_report_mu_, + [this] { return load_report_ready_; }); + load_report_ready_ = false; + return client_stats_.get(); + } + + void NotifyDoneWithLrsCall() { + grpc_core::MutexLock lock(&lrs_mu_); + NotifyDoneWithLrsCallLocked(); + } + + void NotifyDoneWithLrsCallLocked() { + if (!lrs_done) { + lrs_done = true; + lrs_cv_.Broadcast(); + } + } + + private: + const int client_load_reporting_interval_seconds_; + + grpc_core::CondVar lrs_cv_; + // Protect lrs_done. + grpc_core::Mutex lrs_mu_; + bool lrs_done = false; + + grpc_core::CondVar load_report_cond_; + // Protect the members below. + grpc_core::Mutex load_report_mu_; + std::unique_ptr client_stats_; + bool load_report_ready_ = false; }; class XdsEnd2endTest : public ::testing::Test { @@ -339,13 +484,13 @@ class XdsEnd2endTest : public ::testing::Test { grpc_core::MakeRefCounted(); // Start the backends. for (size_t i = 0; i < num_backends_; ++i) { - backends_.emplace_back(new ServerThread("backend")); + backends_.emplace_back(new BackendServerThread); backends_.back()->Start(server_host_); } // Start the load balancers. for (size_t i = 0; i < num_balancers_; ++i) { - balancers_.emplace_back(new ServerThread( - "balancer", client_load_reporting_interval_seconds_)); + balancers_.emplace_back( + new BalancerServerThread(client_load_reporting_interval_seconds_)); balancers_.back()->Start(server_host_); } ResetStub(); @@ -398,19 +543,13 @@ class XdsEnd2endTest : public ::testing::Test { } void ResetBackendCounters() { - for (auto& backend : backends_) backend->service_.ResetCounters(); - } - - ClientStats WaitForLoadReports() { - ClientStats client_stats; - // TODO(juanlishen): Wait in LRS. - return client_stats; + for (auto& backend : backends_) backend->backend_service()->ResetCounters(); } bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0) { if (stop_index == 0) stop_index = backends_.size(); for (size_t i = start_index; i < stop_index; ++i) { - if (backends_[i]->service_.request_count() == 0) return false; + if (backends_[i]->backend_service()->request_count() == 0) return false; } return true; } @@ -455,7 +594,7 @@ class XdsEnd2endTest : public ::testing::Test { void WaitForBackend(size_t backend_idx) { do { (void)SendRpc(); - } while (backends_[backend_idx]->service_.request_count() == 0); + } while (backends_[backend_idx]->backend_service()->request_count() == 0); ResetBackendCounters(); } @@ -506,7 +645,7 @@ class XdsEnd2endTest : public ::testing::Test { nullptr) { std::vector ports; for (size_t i = 0; i < balancers_.size(); ++i) { - ports.emplace_back(balancers_[i]->port_); + ports.emplace_back(balancers_[i]->port()); } SetNextResolutionForLbChannel(ports, service_config_json, lb_channel_response_generator); @@ -543,14 +682,32 @@ class XdsEnd2endTest : public ::testing::Test { if (stop_index == 0) stop_index = backends_.size(); std::vector backend_ports; for (size_t i = start_index; i < stop_index; ++i) { - backend_ports.push_back(backends_[i]->port_); + backend_ports.push_back(backends_[i]->port()); + } + return backend_ports; + } + + const std::vector> GetBackendPortsInGroups( + size_t start_index = 0, size_t stop_index = 0, + size_t num_group = 1) const { + if (stop_index == 0) stop_index = backends_.size(); + size_t group_size = (stop_index - start_index) / num_group; + std::vector> backend_ports; + for (size_t i = 0; i < num_group; ++i) { + backend_ports.emplace_back(); + size_t group_start = group_size * i + start_index; + size_t group_stop = + i == num_group - 1 ? stop_index : group_start + group_size; + for (size_t j = group_start; j < group_stop; ++j) { + backend_ports[i].push_back(backends_[j]->port()); + } } return backend_ports; } void ScheduleResponseForBalancer(size_t i, const DiscoveryResponse& response, int delay_ms) { - balancers_[i]->service_.add_response(response, delay_ms); + balancers_[i]->eds_service()->add_response(response, delay_ms); } Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000, @@ -583,71 +740,124 @@ class XdsEnd2endTest : public ::testing::Test { EXPECT_FALSE(status.ok()); } - template - struct ServerThread { - template - explicit ServerThread(const grpc::string& type, Args&&... args) - : port_(grpc_pick_unused_port_or_die()), - type_(type), - service_(std::forward(args)...) {} + class ServerThread { + public: + ServerThread() : port_(grpc_pick_unused_port_or_die()) {} + virtual ~ServerThread(){}; void Start(const grpc::string& server_host) { - gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); + gpr_log(GPR_INFO, "starting %s server on port %d", Type(), port_); GPR_ASSERT(!running_); running_ = true; - grpc::internal::Mutex mu; + StartAllServices(); + grpc_core::Mutex mu; // We need to acquire the lock here in order to prevent the notify_one // by ServerThread::Serve from firing before the wait below is hit. - grpc::internal::MutexLock lock(&mu); - grpc::internal::CondVar cond; + grpc_core::MutexLock lock(&mu); + grpc_core::CondVar cond; thread_.reset(new std::thread( std::bind(&ServerThread::Serve, this, server_host, &mu, &cond))); cond.Wait(&mu); - gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); + gpr_log(GPR_INFO, "%s server startup complete", Type()); } - void Serve(const grpc::string& server_host, grpc::internal::Mutex* mu, - grpc::internal::CondVar* cond) { + void Serve(const grpc::string& server_host, grpc_core::Mutex* mu, + grpc_core::CondVar* cond) { // We need to acquire the lock here in order to prevent the notify_one // below from firing before its corresponding wait is executed. - grpc::internal::MutexLock lock(mu); + grpc_core::MutexLock lock(mu); std::ostringstream server_address; server_address << server_host << ":" << port_; ServerBuilder builder; std::shared_ptr creds(new SecureServerCredentials( grpc_fake_transport_security_server_credentials_create())); builder.AddListeningPort(server_address.str(), creds); - builder.RegisterService(&service_); + RegisterAllServices(&builder); server_ = builder.BuildAndStart(); cond->Signal(); } void Shutdown() { if (!running_) return; - gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str()); - service_.Shutdown(); + gpr_log(GPR_INFO, "%s about to shutdown", Type()); + ShutdownAllServices(); server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); thread_->join(); - gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str()); + gpr_log(GPR_INFO, "%s shutdown completed", Type()); running_ = false; } + int port() const { return port_; } + + private: + virtual void RegisterAllServices(ServerBuilder* builder) = 0; + virtual void StartAllServices() = 0; + virtual void ShutdownAllServices() = 0; + + virtual const char* Type() = 0; + const int port_; - grpc::string type_; - T service_; std::unique_ptr server_; std::unique_ptr thread_; bool running_ = false; }; + class BackendServerThread : public ServerThread { + public: + BackendServiceImpl* backend_service() { return &backend_service_; } + + private: + void RegisterAllServices(ServerBuilder* builder) override { + builder->RegisterService(&backend_service_); + } + + void StartAllServices() override { backend_service_.Start(); } + + void ShutdownAllServices() override { backend_service_.Shutdown(); } + + const char* Type() override { return "Backend"; } + + BackendServiceImpl backend_service_; + }; + + class BalancerServerThread : public ServerThread { + public: + explicit BalancerServerThread(int client_load_reporting_interval = 0) + : lrs_service_(client_load_reporting_interval) {} + + EdsServiceImpl* eds_service() { return &eds_service_; } + LrsServiceImpl* lrs_service() { return &lrs_service_; } + + private: + void RegisterAllServices(ServerBuilder* builder) override { + builder->RegisterService(&eds_service_); + builder->RegisterService(&lrs_service_); + } + + void StartAllServices() override { + eds_service_.Start(); + lrs_service_.Start(); + } + + void ShutdownAllServices() override { + eds_service_.Shutdown(); + lrs_service_.Shutdown(); + } + + const char* Type() override { return "Balancer"; } + + EdsServiceImpl eds_service_; + LrsServiceImpl lrs_service_; + }; + const grpc::string server_host_; const size_t num_backends_; const size_t num_balancers_; const int client_load_reporting_interval_seconds_; std::shared_ptr channel_; std::unique_ptr stub_; - std::vector>> backends_; - std::vector>> balancers_; + std::vector> backends_; + std::vector> balancers_; grpc_core::RefCountedPtr response_generator_; grpc_core::RefCountedPtr @@ -673,7 +883,7 @@ TEST_F(SingleBalancerTest, Vanilla) { SetNextResolutionForLbChannelAllBalancers(); const size_t kNumRpcsPerAddress = 100; ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), 0); // Make sure that trying to connect works without a call. channel_->GetState(true /* try_to_connect */); @@ -683,14 +893,13 @@ TEST_F(SingleBalancerTest, Vanilla) { CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); // Each backend should have gotten 100 requests. for (size_t i = 0; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count()); + EXPECT_EQ(kNumRpcsPerAddress, + backends_[i]->backend_service()->request_count()); } - balancers_[0]->service_.NotifyDoneWithServerlists(); - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - + balancers_[0]->eds_service()->NotifyDoneWithEdsCall(); + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); // Check LB policy name for the channel. EXPECT_EQ("xds_experimental", channel_->GetLoadBalancingPolicyName()); } @@ -700,31 +909,32 @@ TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) { SetNextResolutionForLbChannelAllBalancers(); // Same backend listed twice. std::vector ports; - ports.push_back(backends_[0]->port_); - ports.push_back(backends_[0]->port_); + ports.push_back(backends_[0]->port()); + ports.push_back(backends_[0]->port()); const size_t kNumRpcsPerAddress = 10; ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends({ports}), 0); // We need to wait for the backend to come online. WaitForBackend(0); // Send kNumRpcsPerAddress RPCs per server. CheckRpcSendOk(kNumRpcsPerAddress * ports.size()); // Backend should have gotten 20 requests. - EXPECT_EQ(kNumRpcsPerAddress * 2, backends_[0]->service_.request_count()); + EXPECT_EQ(kNumRpcsPerAddress * 2, + backends_[0]->backend_service()->request_count()); // And they should have come from a single client port, because of // subchannel sharing. - EXPECT_EQ(1UL, backends_[0]->service_.clients().size()); - balancers_[0]->service_.NotifyDoneWithServerlists(); + EXPECT_EQ(1UL, backends_[0]->backend_service()->clients().size()); + balancers_[0]->eds_service()->NotifyDoneWithEdsCall(); } TEST_F(SingleBalancerTest, SecureNaming) { // TODO(juanlishen): Use separate fake creds for the balancer channel. ResetStub(0, kApplicationTargetName_ + ";lb"); SetNextResolution({}, kDefaultServiceConfig_.c_str()); - SetNextResolutionForLbChannel({balancers_[0]->port_}); + SetNextResolutionForLbChannel({balancers_[0]->port()}); const size_t kNumRpcsPerAddress = 100; ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), 0); // Make sure that trying to connect works without a call. channel_->GetState(true /* try_to_connect */); @@ -735,12 +945,12 @@ TEST_F(SingleBalancerTest, SecureNaming) { // Each backend should have gotten 100 requests. for (size_t i = 0; i < backends_.size(); ++i) { - EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count()); + EXPECT_EQ(kNumRpcsPerAddress, + backends_[i]->backend_service()->request_count()); } - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); } TEST_F(SingleBalancerTest, SecureNamingDeathTest) { @@ -758,7 +968,7 @@ TEST_F(SingleBalancerTest, SecureNamingDeathTest) { "\"fake:///wrong_lb\" } }\n" " ]\n" "}"); - SetNextResolutionForLbChannel({balancers_[0]->port_}); + SetNextResolutionForLbChannel({balancers_[0]->port()}); channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1)); }, ""); @@ -770,11 +980,11 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) { const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); const int kCallDeadlineMs = kServerlistDelayMs * 2; // First response is an empty serverlist, sent right away. - ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends({}, {}), 0); + ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponseForBackends({{}}), + 0); // Send non-empty serverlist only after kServerlistDelayMs ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), kServerlistDelayMs); const auto t0 = system_clock::now(); // Client will block: LB will initially send empty serverlist. @@ -787,11 +997,11 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) { // populated serverlist but under the call's deadline (which is enforced by // the call's deadline). EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs); - balancers_[0]->service_.NotifyDoneWithServerlists(); - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); + balancers_[0]->eds_service()->NotifyDoneWithEdsCall(); + // The EDS service got a single request. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); // and sent two responses. - EXPECT_EQ(2U, balancers_[0]->service_.response_count()); + EXPECT_EQ(2U, balancers_[0]->eds_service()->response_count()); } TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) { @@ -803,15 +1013,14 @@ TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) { ports.push_back(grpc_pick_unused_port_or_die()); } ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends({ports}), 0); const Status status = SendRpc(); // The error shouldn't be DEADLINE_EXCEEDED. EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code()); - balancers_[0]->service_.NotifyDoneWithServerlists(); - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); + balancers_[0]->eds_service()->NotifyDoneWithEdsCall(); + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); } TEST_F(SingleBalancerTest, Fallback) { @@ -825,8 +1034,8 @@ TEST_F(SingleBalancerTest, Fallback) { // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( 0, - BalancerServiceImpl::BuildResponseForBackends( - GetBackendPorts(kNumBackendsInResolution /* start_index */), {}), + EdsServiceImpl::BuildResponseForBackends( + GetBackendPortsInGroups(kNumBackendsInResolution /* start_index */)), kServerlistDelayMs); // Wait until all the fallback backends are reachable. WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */, @@ -837,10 +1046,10 @@ TEST_F(SingleBalancerTest, Fallback) { // Fallback is used: each backend returned by the resolver should have // gotten one request. for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - EXPECT_EQ(1U, backends_[i]->service_.request_count()); + EXPECT_EQ(1U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } // Wait until the serverlist reception has been processed and all backends // in the serverlist are reachable. @@ -852,15 +1061,14 @@ TEST_F(SingleBalancerTest, Fallback) { // Serverlist is used: each backend returned by the balancer should // have gotten one request. for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) { - EXPECT_EQ(1U, backends_[i]->service_.request_count()); + EXPECT_EQ(1U, backends_[i]->backend_service()->request_count()); } - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); } TEST_F(SingleBalancerTest, FallbackUpdate) { @@ -875,10 +1083,9 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { // Send non-empty serverlist only after kServerlistDelayMs. ScheduleResponseForBalancer( 0, - BalancerServiceImpl::BuildResponseForBackends( - GetBackendPorts(kNumBackendsInResolution + - kNumBackendsInResolutionUpdate /* start_index */), - {}), + EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups( + kNumBackendsInResolution + + kNumBackendsInResolutionUpdate /* start_index */)), kServerlistDelayMs); // Wait until all the fallback backends are reachable. WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */, @@ -889,10 +1096,10 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { // Fallback is used: each backend returned by the resolver should have // gotten one request. for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - EXPECT_EQ(1U, backends_[i]->service_.request_count()); + EXPECT_EQ(1U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution; i < backends_.size(); ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } SetNextResolution(GetBackendPorts(kNumBackendsInResolution, kNumBackendsInResolution + @@ -910,15 +1117,15 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { // The resolution update is used: each backend in the resolution update should // have gotten one request. for (size_t i = 0; i < kNumBackendsInResolution; ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution; i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) { - EXPECT_EQ(1U, backends_[i]->service_.request_count()); + EXPECT_EQ(1U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate; i < backends_.size(); ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } // Wait until the serverlist reception has been processed and all backends // in the serverlist are reachable. @@ -933,23 +1140,22 @@ TEST_F(SingleBalancerTest, FallbackUpdate) { // have gotten one request. for (size_t i = 0; i < kNumBackendsInResolution + kNumBackendsInResolutionUpdate; ++i) { - EXPECT_EQ(0U, backends_[i]->service_.request_count()); + EXPECT_EQ(0U, backends_[i]->backend_service()->request_count()); } for (size_t i = kNumBackendsInResolution + kNumBackendsInResolutionUpdate; i < backends_.size(); ++i) { - EXPECT_EQ(1U, backends_[i]->service_.request_count()); + EXPECT_EQ(1U, backends_[i]->backend_service()->request_count()); } - // The balancer got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); } TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return an unreachable balancer and one fallback backend. - SetNextResolution({backends_[0]->port_}, kDefaultServiceConfig_.c_str()); + SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()}); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. @@ -961,10 +1167,10 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) { const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); // Return one balancer and one fallback backend. - SetNextResolution({backends_[0]->port_}, kDefaultServiceConfig_.c_str()); + SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); // Balancer drops call without sending a serverlist. - balancers_[0]->service_.NotifyDoneWithServerlists(); + balancers_[0]->eds_service()->NotifyDoneWithEdsCall(); // Send RPC with deadline less than the fallback timeout and make sure it // succeeds. CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000, @@ -974,13 +1180,13 @@ TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) { TEST_F(SingleBalancerTest, FallbackIfResponseReceivedButChildNotReady) { const int kFallbackTimeoutMs = 500 * grpc_test_slowdown_factor(); ResetStub(kFallbackTimeoutMs); - SetNextResolution({backends_[0]->port_}, kDefaultServiceConfig_.c_str()); + SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); // Send a serverlist that only contains an unreachable backend before fallback // timeout. ScheduleResponseForBalancer(0, - BalancerServiceImpl::BuildResponseForBackends( - {grpc_pick_unused_port_or_die()}, {}), + EdsServiceImpl::BuildResponseForBackends( + {{grpc_pick_unused_port_or_die()}}), 0); // Because no child policy is ready before fallback timeout, we enter fallback // mode. @@ -989,13 +1195,13 @@ TEST_F(SingleBalancerTest, FallbackIfResponseReceivedButChildNotReady) { TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) { // Return an unreachable balancer and one fallback backend. - SetNextResolution({backends_[0]->port_}, kDefaultServiceConfig_.c_str()); + SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()}); // Enter fallback mode because the LB channel fails to connect. WaitForBackend(0); // Return a new balancer that sends an empty serverlist. - ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends({}, {}), 0); + ScheduleResponseForBalancer(0, EdsServiceImpl::BuildResponseForBackends({{}}), + 0); SetNextResolutionForLbChannelAllBalancers(); // Send RPCs until failure. gpr_timespec deadline = gpr_time_add( @@ -1009,16 +1215,14 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedWhenBalancerSaysToDropAllCalls) { TEST_F(SingleBalancerTest, FallbackModeIsExitedAfterChildRready) { // Return an unreachable balancer and one fallback backend. - SetNextResolution({backends_[0]->port_}, kDefaultServiceConfig_.c_str()); + SetNextResolution({backends_[0]->port()}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannel({grpc_pick_unused_port_or_die()}); // Enter fallback mode because the LB channel fails to connect. WaitForBackend(0); // Return a new balancer that sends a dead backend. ShutdownBackend(1); ScheduleResponseForBalancer( - 0, - BalancerServiceImpl::BuildResponseForBackends({backends_[1]->port_}, {}), - 0); + 0, EdsServiceImpl::BuildResponseForBackends({{backends_[1]->port()}}), 0); SetNextResolutionForLbChannelAllBalancers(); // The state (TRANSIENT_FAILURE) update from the child policy will be ignored // because we are still in fallback mode. @@ -1035,15 +1239,15 @@ TEST_F(SingleBalancerTest, FallbackModeIsExitedAfterChildRready) { // We have exited fallback mode, so calls will go to the child policy // exclusively. CheckRpcSendOk(100); - EXPECT_EQ(0U, backends_[0]->service_.request_count()); - EXPECT_EQ(100U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(100U, backends_[1]->backend_service()->request_count()); } TEST_F(SingleBalancerTest, BackendsRestart) { SetNextResolution({}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}), + 0, EdsServiceImpl::BuildResponseForBackends(GetBackendPortsInGroups()), 0); WaitForAllBackends(); // Stop backends. RPCs should fail. @@ -1063,12 +1267,12 @@ class UpdatesTest : public XdsEnd2endTest { TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { SetNextResolution({}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); - const std::vector first_backend{GetBackendPorts()[0]}; - const std::vector second_backend{GetBackendPorts()[1]}; + auto first_backend = GetBackendPortsInGroups(0, 1); + auto second_backend = GetBackendPortsInGroups(1, 2); ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); ScheduleResponseForBalancer( - 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + 1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); // Wait until the first backend is ready. WaitForBackend(0); @@ -1079,22 +1283,22 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. - EXPECT_EQ(10U, backends_[0]->service_.request_count()); + EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // Balancer 0 got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(0U, balancers_[1]->service_.request_count()); - EXPECT_EQ(0U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + // The EDS service of balancer 0 got a single request, and sent a single + // response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); - SetNextResolutionForLbChannel({balancers_[1]->port_}); + SetNextResolutionForLbChannel({balancers_[1]->port()}); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); gpr_timespec deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN)); // Send 10 seconds worth of RPCs @@ -1103,25 +1307,25 @@ TEST_F(UpdatesTest, UpdateBalancersButKeepUsingOriginalBalancer) { } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); // The current LB call is still working, so xds continued using it to the // first balancer, which doesn't assign the second backend. - EXPECT_EQ(0U, backends_[1]->service_.request_count()); - - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(0U, balancers_[1]->service_.request_count()); - EXPECT_EQ(0U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); + + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); } TEST_F(UpdatesTest, UpdateBalancerName) { SetNextResolution({}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); - const std::vector first_backend{GetBackendPorts()[0]}; - const std::vector second_backend{GetBackendPorts()[1]}; + auto first_backend = GetBackendPortsInGroups(0, 1); + auto second_backend = GetBackendPortsInGroups(1, 2); ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); ScheduleResponseForBalancer( - 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + 1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); // Wait until the first backend is ready. WaitForBackend(0); @@ -1132,19 +1336,19 @@ TEST_F(UpdatesTest, UpdateBalancerName) { gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. - EXPECT_EQ(10U, backends_[0]->service_.request_count()); + EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // Balancer 0 got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(0U, balancers_[1]->service_.request_count()); - EXPECT_EQ(0U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + // The EDS service of balancer 0 got a single request, and sent a single + // response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); std::vector ports; - ports.emplace_back(balancers_[1]->port_); + ports.emplace_back(balancers_[1]->port()); auto new_lb_channel_response_generator = grpc_core::MakeRefCounted(); SetNextResolutionForLbChannel(ports, nullptr, @@ -1163,22 +1367,22 @@ TEST_F(UpdatesTest, UpdateBalancerName) { // Wait until update has been processed, as signaled by the second backend // receiving a request. - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); WaitForBackend(1); - backends_[1]->service_.ResetCounters(); + backends_[1]->backend_service()->ResetCounters(); gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); CheckRpcSendOk(10); gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); // All 10 requests should have gone to the second backend. - EXPECT_EQ(10U, backends_[1]->service_.request_count()); - - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(1U, balancers_[1]->service_.request_count()); - EXPECT_EQ(1U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + EXPECT_EQ(10U, backends_[1]->backend_service()->request_count()); + + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(1U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); } // Send an update with the same set of LBs as the one in SetUp() in order to @@ -1187,13 +1391,12 @@ TEST_F(UpdatesTest, UpdateBalancerName) { TEST_F(UpdatesTest, UpdateBalancersRepeated) { SetNextResolution({}, kDefaultServiceConfig_.c_str()); SetNextResolutionForLbChannelAllBalancers(); - const std::vector first_backend{GetBackendPorts()[0]}; - const std::vector second_backend{GetBackendPorts()[0]}; - + auto first_backend = GetBackendPortsInGroups(0, 1); + auto second_backend = GetBackendPortsInGroups(1, 2); ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); ScheduleResponseForBalancer( - 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + 1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); // Wait until the first backend is ready. WaitForBackend(0); @@ -1204,26 +1407,26 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. - EXPECT_EQ(10U, backends_[0]->service_.request_count()); + EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); - // Balancer 0 got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(0U, balancers_[1]->service_.request_count()); - EXPECT_EQ(0U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + // The EDS service of balancer 0 got a single request, and sent a single + // response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); std::vector ports; - ports.emplace_back(balancers_[0]->port_); - ports.emplace_back(balancers_[1]->port_); - ports.emplace_back(balancers_[2]->port_); + ports.emplace_back(balancers_[0]->port()); + ports.emplace_back(balancers_[1]->port()); + ports.emplace_back(balancers_[2]->port()); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); SetNextResolutionForLbChannel(ports); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); gpr_timespec deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN)); // Send 10 seconds worth of RPCs @@ -1232,16 +1435,16 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); // xds continued using the original LB call to the first balancer, which // doesn't assign the second backend. - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); ports.clear(); - ports.emplace_back(balancers_[0]->port_); - ports.emplace_back(balancers_[1]->port_); + ports.emplace_back(balancers_[0]->port()); + ports.emplace_back(balancers_[1]->port()); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 =========="); SetNextResolutionForLbChannel(ports); gpr_log(GPR_INFO, "========= UPDATE 2 DONE =========="); - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN)); // Send 10 seconds worth of RPCs @@ -1250,26 +1453,25 @@ TEST_F(UpdatesTest, UpdateBalancersRepeated) { } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0); // xds continued using the original LB call to the first balancer, which // doesn't assign the second backend. - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); } TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { SetNextResolution({}, kDefaultServiceConfig_.c_str()); - SetNextResolutionForLbChannel({balancers_[0]->port_}); - const std::vector first_backend{GetBackendPorts()[0]}; - const std::vector second_backend{GetBackendPorts()[1]}; - + SetNextResolutionForLbChannel({balancers_[0]->port()}); + auto first_backend = GetBackendPortsInGroups(0, 1); + auto second_backend = GetBackendPortsInGroups(1, 2); ScheduleResponseForBalancer( - 0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0); + 0, EdsServiceImpl::BuildResponseForBackends(first_backend), 0); ScheduleResponseForBalancer( - 1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0); + 1, EdsServiceImpl::BuildResponseForBackends(second_backend), 0); // Start servers and send 10 RPCs per server. gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); CheckRpcSendOk(10); gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); // All 10 requests should have gone to the first backend. - EXPECT_EQ(10U, backends_[0]->service_.request_count()); + EXPECT_EQ(10U, backends_[0]->backend_service()->request_count()); // Kill balancer 0 gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); @@ -1281,48 +1483,48 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { CheckRpcSendOk(10); gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); // All 10 requests should again have gone to the first backend. - EXPECT_EQ(20U, backends_[0]->service_.request_count()); - EXPECT_EQ(0U, backends_[1]->service_.request_count()); - - // Balancer 0 got a single request. - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - // and sent a single response. - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); - EXPECT_EQ(0U, balancers_[1]->service_.request_count()); - EXPECT_EQ(0U, balancers_[1]->service_.response_count()); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + EXPECT_EQ(20U, backends_[0]->backend_service()->request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); + + // The EDS service of balancer 0 got a single request, and sent a single + // response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[1]->eds_service()->response_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 =========="); - SetNextResolutionForLbChannel({balancers_[1]->port_}); + SetNextResolutionForLbChannel({balancers_[1]->port()}); gpr_log(GPR_INFO, "========= UPDATE 1 DONE =========="); // Wait until update has been processed, as signaled by the second backend // receiving a request. In the meantime, the client continues to be serviced // (by the first backend) without interruption. - EXPECT_EQ(0U, backends_[1]->service_.request_count()); + EXPECT_EQ(0U, backends_[1]->backend_service()->request_count()); WaitForBackend(1); // This is serviced by the updated RR policy - backends_[1]->service_.ResetCounters(); + backends_[1]->backend_service()->ResetCounters(); gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH =========="); CheckRpcSendOk(10); gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH =========="); // All 10 requests should have gone to the second backend. - EXPECT_EQ(10U, backends_[1]->service_.request_count()); + EXPECT_EQ(10U, backends_[1]->backend_service()->request_count()); - EXPECT_EQ(1U, balancers_[0]->service_.request_count()); - EXPECT_EQ(1U, balancers_[0]->service_.response_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); // The second balancer, published as part of the first update, may end up // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer // firing races with the arrival of the update containing the second // balancer. - EXPECT_GE(balancers_[1]->service_.request_count(), 1U); - EXPECT_GE(balancers_[1]->service_.response_count(), 1U); - EXPECT_LE(balancers_[1]->service_.request_count(), 2U); - EXPECT_LE(balancers_[1]->service_.response_count(), 2U); - EXPECT_EQ(0U, balancers_[2]->service_.request_count()); - EXPECT_EQ(0U, balancers_[2]->service_.response_count()); + EXPECT_GE(balancers_[1]->eds_service()->request_count(), 1U); + EXPECT_GE(balancers_[1]->eds_service()->response_count(), 1U); + EXPECT_LE(balancers_[1]->eds_service()->request_count(), 2U); + EXPECT_LE(balancers_[1]->eds_service()->response_count(), 2U); + EXPECT_EQ(0U, balancers_[2]->eds_service()->request_count()); + EXPECT_EQ(0U, balancers_[2]->eds_service()->response_count()); } // The re-resolution tests are deferred because they rely on the fallback mode, @@ -1346,15 +1548,105 @@ class SingleBalancerWithClientLoadReportingTest : public XdsEnd2endTest { SingleBalancerWithClientLoadReportingTest() : XdsEnd2endTest(4, 1, 3) {} }; -// The client load reporting tests are deferred because the client load -// reporting hasn't been supported yet. - -// TODO(vpowar): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) +TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannel({balancers_[0]->port()}); + const size_t kNumRpcsPerAddress = 100; + // TODO(juanlishen): Partition the backends after multiple localities is + // tested. + ScheduleResponseForBalancer(0, + EdsServiceImpl::BuildResponseForBackends( + GetBackendPortsInGroups(0, backends_.size())), + 0); + // Wait until all backends are ready. + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends(); + // Send kNumRpcsPerAddress RPCs per server. + CheckRpcSendOk(kNumRpcsPerAddress * num_backends_); + // Each backend should have gotten 100 requests. + for (size_t i = 0; i < backends_.size(); ++i) { + EXPECT_EQ(kNumRpcsPerAddress, + backends_[i]->backend_service()->request_count()); + } + // The EDS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->eds_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->eds_service()->response_count()); + // The LRS service got a single request, and sent a single response. + EXPECT_EQ(1U, balancers_[0]->lrs_service()->request_count()); + EXPECT_EQ(1U, balancers_[0]->lrs_service()->response_count()); + // The load report received at the balancer should be correct. + ClientStats* client_stats = balancers_[0]->lrs_service()->WaitForLoadReport(); + EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, + client_stats->total_successful_requests()); + EXPECT_EQ(0U, client_stats->total_requests_in_progress()); + EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok, + client_stats->total_issued_requests()); + EXPECT_EQ(0U, client_stats->total_error_requests()); + EXPECT_EQ(0U, client_stats->total_dropped_requests()); +} -// TODO(roth): Add TEST_F(SingleBalancerWithClientLoadReportingTest, -// BalancerRestart) +TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) { + SetNextResolution({}, kDefaultServiceConfig_.c_str()); + SetNextResolutionForLbChannel({balancers_[0]->port()}); + const size_t kNumBackendsFirstPass = backends_.size() / 2; + const size_t kNumBackendsSecondPass = + backends_.size() - kNumBackendsFirstPass; + ScheduleResponseForBalancer( + 0, + EdsServiceImpl::BuildResponseForBackends( + GetBackendPortsInGroups(0, kNumBackendsFirstPass)), + 0); + // Wait until all backends returned by the balancer are ready. + int num_ok = 0; + int num_failure = 0; + int num_drops = 0; + std::tie(num_ok, num_failure, num_drops) = + WaitForAllBackends(/* num_requests_multiple_of */ 1, /* start_index */ 0, + /* stop_index */ kNumBackendsFirstPass); + ClientStats* client_stats = balancers_[0]->lrs_service()->WaitForLoadReport(); + EXPECT_EQ(static_cast(num_ok), + client_stats->total_successful_requests()); + EXPECT_EQ(0U, client_stats->total_requests_in_progress()); + EXPECT_EQ(0U, client_stats->total_error_requests()); + EXPECT_EQ(0U, client_stats->total_dropped_requests()); + // Shut down the balancer. + balancers_[0]->Shutdown(); + // Send 1 more request per backend. This will continue using the + // last serverlist we received from the balancer before it was shut down. + ResetBackendCounters(); + CheckRpcSendOk(kNumBackendsFirstPass); + int num_started = kNumBackendsFirstPass; + // Each backend should have gotten 1 request. + for (size_t i = 0; i < kNumBackendsFirstPass; ++i) { + EXPECT_EQ(1UL, backends_[i]->backend_service()->request_count()); + } + // Now restart the balancer, this time pointing to the new backends. + balancers_[0]->Start(server_host_); + ScheduleResponseForBalancer( + 0, + EdsServiceImpl::BuildResponseForBackends( + GetBackendPortsInGroups(kNumBackendsFirstPass)), + 0); + // Wait for queries to start going to one of the new backends. + // This tells us that we're now using the new serverlist. + std::tie(num_ok, num_failure, num_drops) = + WaitForAllBackends(/* num_requests_multiple_of */ 1, + /* start_index */ kNumBackendsFirstPass); + num_started += num_ok + num_failure + num_drops; + // Send one RPC per backend. + CheckRpcSendOk(kNumBackendsSecondPass); + num_started += kNumBackendsSecondPass; + // Check client stats. + client_stats = balancers_[0]->lrs_service()->WaitForLoadReport(); + EXPECT_EQ(num_started, client_stats->total_successful_requests()); + EXPECT_EQ(0U, client_stats->total_requests_in_progress()); + EXPECT_EQ(0U, client_stats->total_error_requests()); + EXPECT_EQ(0U, client_stats->total_dropped_requests()); +} -// TODO(roth): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) +// TODO(juanlishen): Add TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) } // namespace } // namespace testing diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py index f0f94c00466..fc1bfe99934 100755 --- a/tools/codegen/core/gen_static_metadata.py +++ b/tools/codegen/core/gen_static_metadata.py @@ -63,6 +63,7 @@ CONFIG = [ 'grpc.max_response_message_bytes', # well known method names '/grpc.lb.v1.LoadBalancer/BalanceLoad', + '/envoy.service.load_stats.v2.LoadReportingService/StreamLoadStats', '/envoy.api.v2.EndpointDiscoveryService/StreamEndpoints', '/grpc.health.v1.Health/Watch', '/envoy.service.discovery.v2.AggregatedDiscoveryService/StreamAggregatedResources', diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 36e6ef1d48c..f39c72c4d5e 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -5307,9 +5307,12 @@ "grpc_test_util" ], "headers": [ - "src/proto/grpc/lb/v2/xds_for_test.grpc.pb.h", - "src/proto/grpc/lb/v2/xds_for_test.pb.h", - "src/proto/grpc/lb/v2/xds_for_test_mock.grpc.pb.h" + "src/proto/grpc/lb/v2/eds_for_test.grpc.pb.h", + "src/proto/grpc/lb/v2/eds_for_test.pb.h", + "src/proto/grpc/lb/v2/eds_for_test_mock.grpc.pb.h", + "src/proto/grpc/lb/v2/lrs_for_test.grpc.pb.h", + "src/proto/grpc/lb/v2/lrs_for_test.pb.h", + "src/proto/grpc/lb/v2/lrs_for_test_mock.grpc.pb.h" ], "is_filegroup": false, "language": "c++", @@ -9467,7 +9470,6 @@ "gpr", "grpc_base", "grpc_client_channel", - "grpc_lb_upb", "grpc_resolver_fake" ], "headers": [ @@ -9498,7 +9500,6 @@ "gpr", "grpc_base", "grpc_client_channel", - "grpc_lb_upb", "grpc_resolver_fake", "grpc_secure" ],