diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc index ad00855be71..891df3baf6b 100644 --- a/src/core/ext/filters/client_channel/client_channel.cc +++ b/src/core/ext/filters/client_channel/client_channel.cc @@ -94,7 +94,7 @@ grpc_core::TraceFlag grpc_client_channel_routing_trace( struct external_connectivity_watcher; struct QueuedPick { - LoadBalancingPolicy::PickState pick; + LoadBalancingPolicy::PickArgs pick; grpc_call_element* elem; QueuedPick* next = nullptr; }; @@ -298,7 +298,7 @@ static grpc_error* do_ping_locked(channel_data* chand, grpc_transport_op* op) { GRPC_ERROR_UNREF(error); return new_error; } - LoadBalancingPolicy::PickState pick; + LoadBalancingPolicy::PickArgs pick; chand->picker->Pick(&pick, &error); if (pick.connected_subchannel != nullptr) { pick.connected_subchannel->Ping(op->send_ping.on_initiate, @@ -931,7 +931,7 @@ static void free_cached_send_op_data_for_completed_batch( // void maybe_inject_recv_trailing_metadata_ready_for_lb( - const LoadBalancingPolicy::PickState& pick, + const LoadBalancingPolicy::PickArgs& pick, grpc_transport_stream_op_batch* batch) { if (pick.recv_trailing_metadata_ready != nullptr) { *pick.original_recv_trailing_metadata_ready = @@ -2635,14 +2635,13 @@ static void maybe_apply_service_config_to_call_locked(grpc_call_element* elem) { } } -static const char* pick_result_name( - LoadBalancingPolicy::SubchannelPicker::PickResult result) { +static const char* pick_result_name(LoadBalancingPolicy::PickResult result) { switch (result) { - case LoadBalancingPolicy::SubchannelPicker::PICK_COMPLETE: + case LoadBalancingPolicy::PICK_COMPLETE: return "COMPLETE"; - case LoadBalancingPolicy::SubchannelPicker::PICK_QUEUE: + case LoadBalancingPolicy::PICK_QUEUE: return "QUEUE"; - case LoadBalancingPolicy::SubchannelPicker::PICK_TRANSIENT_FAILURE: + case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE: return "TRANSIENT_FAILURE"; } GPR_UNREACHABLE_CODE(return "UNKNOWN"); @@ -2692,7 +2691,7 @@ static void start_pick_locked(void* arg, grpc_error* error) { grpc_error_string(error)); } switch (pick_result) { - case LoadBalancingPolicy::SubchannelPicker::PICK_TRANSIENT_FAILURE: + case LoadBalancingPolicy::PICK_TRANSIENT_FAILURE: // If we're shutting down, fail all RPCs. if (chand->disconnect_error != GRPC_ERROR_NONE) { GRPC_ERROR_UNREF(error); @@ -2724,7 +2723,7 @@ static void start_pick_locked(void* arg, grpc_error* error) { // picker. GRPC_ERROR_UNREF(error); // Fallthrough - case LoadBalancingPolicy::SubchannelPicker::PICK_QUEUE: + case LoadBalancingPolicy::PICK_QUEUE: if (!calld->pick_queued) add_call_to_queued_picks_locked(elem); break; default: // PICK_COMPLETE diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc index 527b241eb6b..f370d745bb1 100644 --- a/src/core/ext/filters/client_channel/lb_policy.cc +++ b/src/core/ext/filters/client_channel/lb_policy.cc @@ -28,6 +28,10 @@ grpc_core::DebugOnlyTraceFlag grpc_trace_lb_policy_refcount( namespace grpc_core { +// +// LoadBalancingPolicy +// + LoadBalancingPolicy::LoadBalancingPolicy(Args args, intptr_t initial_refcount) : InternallyRefCounted(&grpc_trace_lb_policy_refcount, initial_refcount), combiner_(GRPC_COMBINER_REF(args.combiner, "lb_policy")), @@ -39,6 +43,26 @@ LoadBalancingPolicy::~LoadBalancingPolicy() { GRPC_COMBINER_UNREF(combiner_, "lb_policy"); } +void LoadBalancingPolicy::Orphan() { + // Invoke ShutdownAndUnrefLocked() inside of the combiner. + // TODO(roth): Is this actually needed? We should already be in the + // combiner here. Note that if we directly call ShutdownLocked(), + // then we can probably remove the hack whereby the helper is + // destroyed at shutdown instead of at destruction. + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(&LoadBalancingPolicy::ShutdownAndUnrefLocked, this, + grpc_combiner_scheduler(combiner_)), + GRPC_ERROR_NONE); +} + +void LoadBalancingPolicy::ShutdownAndUnrefLocked(void* arg, + grpc_error* ignored) { + LoadBalancingPolicy* policy = static_cast(arg); + policy->ShutdownLocked(); + policy->channel_control_helper_.reset(); + policy->Unref(); +} + grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig( const grpc_json* lb_config_array) { if (lb_config_array == nullptr || lb_config_array->type != GRPC_JSON_ARRAY) { @@ -65,4 +89,40 @@ grpc_json* LoadBalancingPolicy::ParseLoadBalancingConfig( return nullptr; } +// +// LoadBalancingPolicy::QueuePicker +// + +LoadBalancingPolicy::PickResult LoadBalancingPolicy::QueuePicker::Pick( + PickArgs* pick, grpc_error** error) { + // We invoke the parent's ExitIdleLocked() via a closure instead + // of doing it directly here, for two reasons: + // 1. ExitIdleLocked() may cause the policy's state to change and + // a new picker to be delivered to the channel. If that new + // picker is delivered before ExitIdleLocked() returns, then by + // the time this function returns, the pick will already have + // been processed, and we'll be trying to re-process the same + // pick again, leading to a crash. + // 2. In a subsequent PR, we will split the data plane and control + // plane synchronization into separate combiners, at which + // point this will need to hop from the data plane combiner into + // the control plane combiner. + if (!exit_idle_called_) { + exit_idle_called_ = true; + parent_->Ref().release(); // ref held by closure. + GRPC_CLOSURE_SCHED( + GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(), + grpc_combiner_scheduler(parent_->combiner())), + GRPC_ERROR_NONE); + } + return PICK_QUEUE; +} + +void LoadBalancingPolicy::QueuePicker::CallExitIdle(void* arg, + grpc_error* error) { + LoadBalancingPolicy* parent = static_cast(arg); + parent->ExitIdleLocked(); + parent->Unref(); +} + } // namespace grpc_core diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h index 785458cbe67..30ff9c3fc95 100644 --- a/src/core/ext/filters/client_channel/lb_policy.h +++ b/src/core/ext/filters/client_channel/lb_policy.h @@ -37,6 +37,36 @@ namespace grpc_core { /// Interface for load balancing policies. /// +/// The following concepts are used here: +/// +/// Channel: An abstraction that manages connections to backend servers +/// on behalf of a client application. The application creates a channel +/// for a given server name and then sends RPCs on it, and the channel +/// figures out which backend server to send each RPC to. A channel +/// contains a resolver, a load balancing policy (or a tree of LB policies), +/// and a set of one or more subchannels. +/// +/// Subchannel: A subchannel represents a connection to one backend server. +/// The LB policy decides which subchannels to create, manages the +/// connectivity state of those subchannels, and decides which subchannel +/// to send any given RPC to. +/// +/// Resolver: A plugin that takes a gRPC server URI and resolves it to a +/// list of one or more addresses and a service config, as described +/// in https://github.com/grpc/grpc/blob/master/doc/naming.md. See +/// resolver.h for the resolver API. +/// +/// Load Balancing (LB) Policy: A plugin that takes a list of addresses +/// from the resolver, maintains and manages a subchannel for each +/// backend address, and decides which subchannel to send each RPC on. +/// An LB policy has two parts: +/// - A LoadBalancingPolicy, which deals with the control plane work of +/// managing subchannels. +/// - A SubchannelPicker, which handles the data plane work of +/// determining which subchannel a given RPC should be sent on. + +/// LoadBalacingPolicy API. +/// /// Note: All methods with a "Locked" suffix must be called from the /// combiner passed to the constructor. /// @@ -46,36 +76,70 @@ namespace grpc_core { // interested_parties() hooks from the API. class LoadBalancingPolicy : public InternallyRefCounted { public: - /// State used for an LB pick. - struct PickState { + /// Arguments used when picking a subchannel for an RPC. + struct PickArgs { + /// + /// Input parameters. + /// /// Initial metadata associated with the picking call. - /// This is both an input and output parameter; the LB policy may - /// use metadata here to influence its routing decision, and it may - /// add new metadata here to be sent with the call to the chosen backend. + /// The LB policy may use the existing metadata to influence its routing + /// decision, and it may add new metadata elements to be sent with the + /// call to the chosen backend. + // TODO(roth): Provide a more generic metadata API here. grpc_metadata_batch* initial_metadata = nullptr; /// Storage for LB token in \a initial_metadata, or nullptr if not used. // TODO(roth): Remove this from the API. Maybe have the LB policy // allocate this on the arena instead? grpc_linked_mdelem lb_token_mdelem_storage; + /// + /// Output parameters. + /// + /// Will be set to the selected subchannel, or nullptr on failure or when + /// the LB policy decides to drop the call. + RefCountedPtr connected_subchannel; /// Callback set by lb policy to be notified of trailing metadata. /// The callback must be scheduled on grpc_schedule_on_exec_ctx. + // TODO(roth): Provide a cleaner callback API. grpc_closure* recv_trailing_metadata_ready = nullptr; /// The address that will be set to point to the original /// recv_trailing_metadata_ready callback, to be invoked by the LB /// policy's recv_trailing_metadata_ready callback when complete. /// Must be non-null if recv_trailing_metadata_ready is non-null. + // TODO(roth): Consider making the recv_trailing_metadata closure a + // synchronous callback, in which case it is not responsible for + // chaining to the next callback, so this can be removed from the API. grpc_closure** original_recv_trailing_metadata_ready = nullptr; /// If this is not nullptr, then the client channel will point it to the /// call's trailing metadata before invoking recv_trailing_metadata_ready. /// If this is nullptr, then the callback will still be called. /// The lb does not have ownership of the metadata. + // TODO(roth): If we make this a synchronous callback, then this can + // be passed to the callback as a parameter and can be removed from + // the API here. grpc_metadata_batch** recv_trailing_metadata = nullptr; - /// Will be set to the selected subchannel, or nullptr on failure or when - /// the LB policy decides to drop the call. - RefCountedPtr connected_subchannel; }; - /// A picker is the object used to actual perform picks. + /// The result of picking a subchannel for an RPC. + enum PickResult { + // Pick complete. If connected_subchannel is non-null, client channel + // can immediately proceed with the call on connected_subchannel; + // otherwise, call should be dropped. + PICK_COMPLETE, + // Pick cannot be completed until something changes on the control + // plane. Client channel will queue the pick and try again the + // next time the picker is updated. + PICK_QUEUE, + // LB policy is in transient failure. If the pick is wait_for_ready, + // client channel will wait for the next picker and try again; + // otherwise, the call will be failed immediately (although it may + // be retried if the client channel is configured to do so). + // The Pick() method will set its error parameter if this value is + // returned. + PICK_TRANSIENT_FAILURE, + }; + + /// A subchannel picker is the object used to pick the subchannel to + /// use for a given RPC. /// /// Pickers are intended to encapsulate all of the state and logic /// needed on the data plane (i.e., to actually process picks for @@ -92,90 +156,14 @@ class LoadBalancingPolicy : public InternallyRefCounted { // synchronization mechanisms, to avoid lock contention between the two. class SubchannelPicker { public: - enum PickResult { - // Pick complete. If connected_subchannel is non-null, client channel - // can immediately proceed with the call on connected_subchannel; - // otherwise, call should be dropped. - PICK_COMPLETE, - // Pick cannot be completed until something changes on the control - // plane. Client channel will queue the pick and try again the - // next time the picker is updated. - PICK_QUEUE, - // LB policy is in transient failure. If the pick is wait_for_ready, - // client channel will wait for the next picker and try again; - // otherwise, the call will be failed immediately (although it may - // be retried if the client channel is configured to do so). - // The Pick() method will set its error parameter if this value is - // returned. - PICK_TRANSIENT_FAILURE, - }; - SubchannelPicker() = default; virtual ~SubchannelPicker() = default; - virtual PickResult Pick(PickState* pick, grpc_error** error) GRPC_ABSTRACT; + virtual PickResult Pick(PickArgs* pick, grpc_error** error) GRPC_ABSTRACT; GRPC_ABSTRACT_BASE_CLASS }; - // A picker that returns PICK_QUEUE for all picks. - // Also calls the parent LB policy's ExitIdleLocked() method when the - // first pick is seen. - class QueuePicker : public SubchannelPicker { - public: - explicit QueuePicker(RefCountedPtr parent) - : parent_(std::move(parent)) {} - - PickResult Pick(PickState* pick, grpc_error** error) override { - // We invoke the parent's ExitIdleLocked() via a closure instead - // of doing it directly here, for two reasons: - // 1. ExitIdleLocked() may cause the policy's state to change and - // a new picker to be delivered to the channel. If that new - // picker is delivered before ExitIdleLocked() returns, then by - // the time this function returns, the pick will already have - // been processed, and we'll be trying to re-process the same - // pick again, leading to a crash. - // 2. In a subsequent PR, we will split the data plane and control - // plane synchronization into separate combiners, at which - // point this will need to hop from the data plane combiner into - // the control plane combiner. - if (!exit_idle_called_) { - exit_idle_called_ = true; - parent_->Ref().release(); // ref held by closure. - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(&CallExitIdle, parent_.get(), - grpc_combiner_scheduler(parent_->combiner())), - GRPC_ERROR_NONE); - } - return PICK_QUEUE; - } - - private: - static void CallExitIdle(void* arg, grpc_error* error) { - LoadBalancingPolicy* parent = static_cast(arg); - parent->ExitIdleLocked(); - parent->Unref(); - } - - RefCountedPtr parent_; - bool exit_idle_called_ = false; - }; - - // A picker that returns PICK_TRANSIENT_FAILURE for all picks. - class TransientFailurePicker : public SubchannelPicker { - public: - explicit TransientFailurePicker(grpc_error* error) : error_(error) {} - ~TransientFailurePicker() { GRPC_ERROR_UNREF(error_); } - - PickResult Pick(PickState* pick, grpc_error** error) override { - *error = GRPC_ERROR_REF(error_); - return PICK_TRANSIENT_FAILURE; - } - - private: - grpc_error* error_; - }; - /// A proxy object used by the LB policy to communicate with the client /// channel. class ChannelControlHelper { @@ -188,6 +176,8 @@ class LoadBalancingPolicy : public InternallyRefCounted { GRPC_ABSTRACT; /// Creates a channel with the specified target and channel args. + /// This can be used in cases where the LB policy needs to create a + /// channel for its own use (e.g., to talk to an external load balancer). virtual grpc_channel* CreateChannel( const char* target, const grpc_channel_args& args) GRPC_ABSTRACT; @@ -203,7 +193,8 @@ class LoadBalancingPolicy : public InternallyRefCounted { GRPC_ABSTRACT_BASE_CLASS }; - // Configuration for an LB policy instance. + /// Configuration for an LB policy instance. + // TODO(roth): Find a better JSON representation for this API. class Config : public RefCounted { public: Config(const grpc_json* lb_config, @@ -234,9 +225,13 @@ class LoadBalancingPolicy : public InternallyRefCounted { /// their constructor. UniquePtr channel_control_helper; /// Channel args. + // TODO(roth): Find a better channel args representation for this API. const grpc_channel_args* args = nullptr; }; + explicit LoadBalancingPolicy(Args args, intptr_t initial_refcount = 1); + virtual ~LoadBalancingPolicy(); + // Not copyable nor movable. LoadBalancingPolicy(const LoadBalancingPolicy&) = delete; LoadBalancingPolicy& operator=(const LoadBalancingPolicy&) = delete; @@ -262,40 +257,62 @@ class LoadBalancingPolicy : public InternallyRefCounted { virtual void ResetBackoffLocked() GRPC_ABSTRACT; /// Populates child_subchannels and child_channels with the uuids of this - /// LB policy's referenced children. This is not invoked from the - /// client_channel's combiner. The implementation is responsible for - /// providing its own synchronization. + /// LB policy's referenced children. + /// + /// This is not invoked from the client_channel's combiner. The + /// implementation is responsible for providing its own synchronization. virtual void FillChildRefsForChannelz( channelz::ChildRefsList* child_subchannels, channelz::ChildRefsList* child_channels) GRPC_ABSTRACT; - void Orphan() override { - // Invoke ShutdownAndUnrefLocked() inside of the combiner. - GRPC_CLOSURE_SCHED( - GRPC_CLOSURE_CREATE(&LoadBalancingPolicy::ShutdownAndUnrefLocked, this, - grpc_combiner_scheduler(combiner_)), - GRPC_ERROR_NONE); + void set_channelz_node( + RefCountedPtr channelz_node) { + channelz_node_ = std::move(channelz_node); } + grpc_pollset_set* interested_parties() const { return interested_parties_; } + + void Orphan() override; + /// Returns the JSON node of policy (with both policy name and config content) /// given the JSON node of a LoadBalancingConfig array. static grpc_json* ParseLoadBalancingConfig(const grpc_json* lb_config_array); - grpc_pollset_set* interested_parties() const { return interested_parties_; } + // A picker that returns PICK_QUEUE for all picks. + // Also calls the parent LB policy's ExitIdleLocked() method when the + // first pick is seen. + class QueuePicker : public SubchannelPicker { + public: + explicit QueuePicker(RefCountedPtr parent) + : parent_(std::move(parent)) {} - void set_channelz_node( - RefCountedPtr channelz_node) { - channelz_node_ = std::move(channelz_node); - } + PickResult Pick(PickArgs* pick, grpc_error** error) override; - GRPC_ABSTRACT_BASE_CLASS + private: + static void CallExitIdle(void* arg, grpc_error* error); - protected: - GPRC_ALLOW_CLASS_TO_USE_NON_PUBLIC_DELETE + RefCountedPtr parent_; + bool exit_idle_called_ = false; + }; - explicit LoadBalancingPolicy(Args args, intptr_t initial_refcount = 1); - virtual ~LoadBalancingPolicy(); + // A picker that returns PICK_TRANSIENT_FAILURE for all picks. + class TransientFailurePicker : public SubchannelPicker { + public: + explicit TransientFailurePicker(grpc_error* error) : error_(error) {} + ~TransientFailurePicker() override { GRPC_ERROR_UNREF(error_); } + + PickResult Pick(PickArgs* pick, grpc_error** error) override { + *error = GRPC_ERROR_REF(error_); + return PICK_TRANSIENT_FAILURE; + } + private: + grpc_error* error_; + }; + + GRPC_ABSTRACT_BASE_CLASS + + protected: grpc_combiner* combiner() const { return combiner_; } // Note: LB policies MUST NOT call any method on the helper from their @@ -309,18 +326,11 @@ class LoadBalancingPolicy : public InternallyRefCounted { return channelz_node_.get(); } - /// Shuts down the policy. Any pending picks that have not been - /// handed off to a new policy via HandOffPendingPicksLocked() will be - /// failed. + /// Shuts down the policy. virtual void ShutdownLocked() GRPC_ABSTRACT; private: - static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored) { - LoadBalancingPolicy* policy = static_cast(arg); - policy->ShutdownLocked(); - policy->channel_control_helper_.reset(); - policy->Unref(); - } + static void ShutdownAndUnrefLocked(void* arg, grpc_error* ignored); /// Combiner under which LB policy actions take place. grpc_combiner* combiner_; diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc index 5906ecafc2a..7d143dfe1aa 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc @@ -254,7 +254,7 @@ class GrpcLb : public LoadBalancingPolicy { child_picker_(std::move(child_picker)), client_stats_(std::move(client_stats)) {} - PickResult Pick(PickState* pick, grpc_error** error) override; + PickResult Pick(PickArgs* pick, grpc_error** error) override; private: // Storing the address for logging, but not holding a ref. @@ -540,8 +540,7 @@ const char* GrpcLb::Serverlist::ShouldDrop() { // GrpcLb::Picker // -GrpcLb::Picker::PickResult GrpcLb::Picker::Pick(PickState* pick, - grpc_error** error) { +GrpcLb::PickResult GrpcLb::Picker::Pick(PickArgs* pick, grpc_error** error) { // Check if we should drop the call. const char* drop_token = serverlist_->ShouldDrop(); if (drop_token != nullptr) { diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc index 15d953cd92f..c1dd9478044 100644 --- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc +++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc @@ -117,7 +117,7 @@ class PickFirst : public LoadBalancingPolicy { explicit Picker(RefCountedPtr connected_subchannel) : connected_subchannel_(std::move(connected_subchannel)) {} - PickResult Pick(PickState* pick, grpc_error** error) override { + PickResult Pick(PickArgs* pick, grpc_error** error) override { pick->connected_subchannel = connected_subchannel_; return PICK_COMPLETE; } diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc index 704a5c28c9e..01068c6dc49 100644 --- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc +++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc @@ -156,7 +156,7 @@ class RoundRobin : public LoadBalancingPolicy { public: Picker(RoundRobin* parent, RoundRobinSubchannelList* subchannel_list); - PickResult Pick(PickState* pick, grpc_error** error) override; + PickResult Pick(PickArgs* pick, grpc_error** error) override; private: // Using pointer value only, no ref held -- do not dereference! @@ -227,8 +227,8 @@ RoundRobin::Picker::Picker(RoundRobin* parent, } } -RoundRobin::Picker::PickResult RoundRobin::Picker::Pick(PickState* pick, - grpc_error** error) { +RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs* pick, + grpc_error** error) { last_picked_index_ = (last_picked_index_ + 1) % subchannels_.size(); if (grpc_lb_round_robin_trace.enabled()) { gpr_log(GPR_INFO, diff --git a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc index eca41bf3a2e..1711c1ab28d 100644 --- a/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc +++ b/src/core/ext/filters/client_channel/lb_policy/xds/xds.cc @@ -260,7 +260,7 @@ class XdsLb : public LoadBalancingPolicy { : child_picker_(std::move(child_picker)), client_stats_(std::move(client_stats)) {} - PickResult Pick(PickState* pick, grpc_error** error) override; + PickResult Pick(PickArgs* pick, grpc_error** error) override; private: UniquePtr child_picker_; @@ -366,8 +366,7 @@ class XdsLb : public LoadBalancingPolicy { // XdsLb::Picker // -XdsLb::Picker::PickResult XdsLb::Picker::Pick(PickState* pick, - grpc_error** error) { +XdsLb::PickResult XdsLb::Picker::Pick(PickArgs* pick, grpc_error** error) { // TODO(roth): Add support for drop handling. // Forward pick to child policy. PickResult result = child_picker_->Pick(pick, error); diff --git a/test/core/util/test_lb_policies.cc b/test/core/util/test_lb_policies.cc index 745162f637f..a8657b02546 100644 --- a/test/core/util/test_lb_policies.cc +++ b/test/core/util/test_lb_policies.cc @@ -121,7 +121,7 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy cb_(cb), user_data_(user_data) {} - PickResult Pick(PickState* pick, grpc_error** error) override { + PickResult Pick(PickArgs* pick, grpc_error** error) override { PickResult result = delegate_picker_->Pick(pick, error); if (result == PICK_COMPLETE && pick->connected_subchannel != nullptr) { New(pick, cb_, user_data_); // deletes itself @@ -171,7 +171,7 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy class TrailingMetadataHandler { public: - TrailingMetadataHandler(PickState* pick, + TrailingMetadataHandler(PickArgs* pick, InterceptRecvTrailingMetadataCallback cb, void* user_data) : cb_(cb), user_data_(user_data) {