Merge pull request #20090 from markdroth/lb_policy_api_improvements

LB policy API changes suggested by Sanjay.
pull/20039/head
Mark D. Roth 5 years ago committed by GitHub
commit 76ac4595e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 10
      src/core/ext/filters/client_channel/client_channel.cc
  2. 2
      src/core/ext/filters/client_channel/lb_policy.cc
  3. 55
      src/core/ext/filters/client_channel/lb_policy.h
  4. 5
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  5. 9
      src/core/ext/filters/client_channel/lb_policy/xds/xds.cc
  6. 4
      src/core/ext/filters/client_channel/resolving_lb_policy.cc
  7. 2
      test/core/util/test_lb_policies.cc

@ -1340,11 +1340,11 @@ class ChannelData::ClientChannelControlHelper
// No-op -- we should never get this from ResolvingLoadBalancingPolicy.
void RequestReresolution() override {}
void AddTraceEvent(TraceSeverity severity, const char* message) override {
void AddTraceEvent(TraceSeverity severity, StringView message) override {
if (chand_->channelz_node_ != nullptr) {
chand_->channelz_node_->AddTraceEvent(
ConvertSeverityEnum(severity),
grpc_slice_from_copied_string(message));
grpc_slice_from_copied_buffer(message.data(), message.size()));
}
}
@ -3725,8 +3725,8 @@ const char* PickResultTypeName(
return "COMPLETE";
case LoadBalancingPolicy::PickResult::PICK_QUEUE:
return "QUEUE";
case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE:
return "TRANSIENT_FAILURE";
case LoadBalancingPolicy::PickResult::PICK_FAILED:
return "FAILED";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
@ -3787,7 +3787,7 @@ void CallData::StartPickLocked(void* arg, grpc_error* error) {
result.subchannel.get(), grpc_error_string(result.error));
}
switch (result.type) {
case LoadBalancingPolicy::PickResult::PICK_TRANSIENT_FAILURE: {
case LoadBalancingPolicy::PickResult::PICK_FAILED: {
// If we're shutting down, fail all RPCs.
grpc_error* disconnect_error = chand->disconnect_error();
if (disconnect_error != GRPC_ERROR_NONE) {

@ -129,7 +129,7 @@ void LoadBalancingPolicy::QueuePicker::CallExitIdle(void* arg,
LoadBalancingPolicy::PickResult
LoadBalancingPolicy::TransientFailurePicker::Pick(PickArgs args) {
PickResult result;
result.type = PickResult::PICK_TRANSIENT_FAILURE;
result.type = PickResult::PICK_FAILED;
result.error = GRPC_ERROR_REF(error_);
return result;
}

@ -42,15 +42,15 @@ extern DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
///
/// Channel: An abstraction that manages connections to backend servers
/// on behalf of a client application. The application creates a channel
/// for a given server name and then sends RPCs on it, and the channel
/// figures out which backend server to send each RPC to. A channel
/// for a given server name and then sends calls (RPCs) on it, and the
/// channel figures out which backend server to send each call to. A channel
/// contains a resolver, a load balancing policy (or a tree of LB policies),
/// and a set of one or more subchannels.
///
/// Subchannel: A subchannel represents a connection to one backend server.
/// The LB policy decides which subchannels to create, manages the
/// connectivity state of those subchannels, and decides which subchannel
/// to send any given RPC to.
/// to send any given call to.
///
/// Resolver: A plugin that takes a gRPC server URI and resolves it to a
/// list of one or more addresses and a service config, as described
@ -59,12 +59,12 @@ extern DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
///
/// Load Balancing (LB) Policy: A plugin that takes a list of addresses
/// from the resolver, maintains and manages a subchannel for each
/// backend address, and decides which subchannel to send each RPC on.
/// backend address, and decides which subchannel to send each call on.
/// An LB policy has two parts:
/// - A LoadBalancingPolicy, which deals with the control plane work of
/// managing subchannels.
/// - A SubchannelPicker, which handles the data plane work of
/// determining which subchannel a given RPC should be sent on.
/// determining which subchannel a given call should be sent on.
/// LoadBalacingPolicy API.
///
@ -78,6 +78,7 @@ extern DebugOnlyTraceFlag grpc_trace_lb_policy_refcount;
class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
public:
/// Interface for accessing per-call state.
/// Implemented by the client channel and used by the SubchannelPicker.
class CallState {
public:
CallState() = default;
@ -93,6 +94,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
};
/// Interface for accessing metadata.
/// Implemented by the client channel and used by the SubchannelPicker.
class MetadataInterface {
public:
// Implementations whose iterators fit in intptr_t may internally
@ -123,7 +125,7 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
GRPC_ABSTRACT_BASE_CLASS
};
/// Arguments used when picking a subchannel for an RPC.
/// Arguments used when picking a subchannel for a call.
struct PickArgs {
/// Initial metadata associated with the picking call.
/// The LB policy may use the existing metadata to influence its routing
@ -135,24 +137,23 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
CallState* call_state;
};
/// The result of picking a subchannel for an RPC.
/// The result of picking a subchannel for a call.
struct PickResult {
enum ResultType {
/// Pick complete. If connected_subchannel is non-null, client channel
/// can immediately proceed with the call on connected_subchannel;
/// otherwise, call should be dropped.
/// Pick complete. If \a subchannel is non-null, the client channel
/// will immediately proceed with the call on that subchannel;
/// otherwise, it will drop the call.
PICK_COMPLETE,
/// Pick cannot be completed until something changes on the control
/// plane. Client channel will queue the pick and try again the
/// plane. The client channel will queue the pick and try again the
/// next time the picker is updated.
PICK_QUEUE,
/// LB policy is in transient failure. If the pick is wait_for_ready,
/// client channel will wait for the next picker and try again;
/// otherwise, the call will be failed immediately (although it may
/// be retried if the client channel is configured to do so).
/// The Pick() method will set its error parameter if this value is
/// returned.
PICK_TRANSIENT_FAILURE,
/// Pick failed. If the call is wait_for_ready, the client channel
/// will wait for the next picker and try again; otherwise, it
/// will immediately fail the call with the status indicated via
/// \a error (although the call may be retried if the client channel
/// is configured to do so).
PICK_FAILED,
};
ResultType type;
@ -160,14 +161,14 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
/// subchannel, or nullptr if the LB policy decides to drop the call.
RefCountedPtr<SubchannelInterface> subchannel;
/// Used only if type is PICK_TRANSIENT_FAILURE.
/// Error to be set when returning a transient failure.
/// Used only if type is PICK_FAILED.
/// Error to be set when returning a failure.
// TODO(roth): Replace this with something similar to grpc::Status,
// so that we don't expose grpc_error to this API.
grpc_error* error = GRPC_ERROR_NONE;
/// Used only if type is PICK_COMPLETE.
/// Callback set by lb policy to be notified of trailing metadata.
/// Callback set by LB policy to be notified of trailing metadata.
/// The user_data argument will be set to the
/// recv_trailing_metadata_ready_user_data field.
/// recv_trailing_metadata will be set to the metadata, which may be
@ -184,11 +185,12 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
};
/// A subchannel picker is the object used to pick the subchannel to
/// use for a given RPC.
/// use for a given call. This is implemented by the LB policy and
/// used by the client channel to perform picks.
///
/// Pickers are intended to encapsulate all of the state and logic
/// needed on the data plane (i.e., to actually process picks for
/// individual RPCs sent on the channel) while excluding all of the
/// individual calls sent on the channel) while excluding all of the
/// state and logic needed on the control plane (i.e., resolver
/// updates, connectivity state notifications, etc); the latter should
/// live in the LB policy object itself.
@ -206,8 +208,8 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
GRPC_ABSTRACT_BASE_CLASS
};
/// A proxy object used by the LB policy to communicate with the client
/// channel.
/// A proxy object implemented by the client channel and used by the
/// LB policy to communicate with the channel.
// TODO(juanlishen): Consider adding a mid-layer subclass that helps handle
// things like swapping in pending policy when it's ready. Currently, we are
// duplicating the logic in many subclasses.
@ -229,10 +231,9 @@ class LoadBalancingPolicy : public InternallyRefCounted<LoadBalancingPolicy> {
virtual void RequestReresolution() GRPC_ABSTRACT;
/// Adds a trace message associated with the channel.
/// Does NOT take ownership of \a message.
enum TraceSeverity { TRACE_INFO, TRACE_WARNING, TRACE_ERROR };
virtual void AddTraceEvent(TraceSeverity severity,
const char* message) GRPC_ABSTRACT;
StringView message) GRPC_ABSTRACT;
GRPC_ABSTRACT_BASE_CLASS
};

@ -296,7 +296,7 @@ class GrpcLb : public LoadBalancingPolicy {
void UpdateState(grpc_connectivity_state state,
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, const char* message) override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
@ -745,8 +745,7 @@ void GrpcLb::Helper::RequestReresolution() {
}
}
void GrpcLb::Helper::AddTraceEvent(TraceSeverity severity,
const char* message) {
void GrpcLb::Helper::AddTraceEvent(TraceSeverity severity, StringView message) {
if (parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return;

@ -434,7 +434,7 @@ class XdsLb : public LoadBalancingPolicy {
void UpdateState(grpc_connectivity_state state,
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity, const char* message) override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
@ -483,8 +483,7 @@ class XdsLb : public LoadBalancingPolicy {
void UpdateState(grpc_connectivity_state state,
UniquePtr<SubchannelPicker> picker) override;
void RequestReresolution() override;
void AddTraceEvent(TraceSeverity severity,
const char* message) override;
void AddTraceEvent(TraceSeverity severity, StringView message) override;
void set_child(LoadBalancingPolicy* child) { child_ = child; }
private:
@ -761,7 +760,7 @@ void XdsLb::FallbackHelper::RequestReresolution() {
}
void XdsLb::FallbackHelper::AddTraceEvent(TraceSeverity severity,
const char* message) {
StringView message) {
if (parent_->shutting_down_ ||
(!CalledByPendingFallback() && !CalledByCurrentFallback())) {
return;
@ -2487,7 +2486,7 @@ void XdsLb::LocalityMap::LocalityEntry::Helper::RequestReresolution() {
}
void XdsLb::LocalityMap::LocalityEntry::Helper::AddTraceEvent(
TraceSeverity severity, const char* message) {
TraceSeverity severity, StringView message) {
if (entry_->parent_->shutting_down_ ||
(!CalledByPendingChild() && !CalledByCurrentChild())) {
return;

@ -153,7 +153,7 @@ class ResolvingLoadBalancingPolicy::ResolvingControlHelper
}
}
void AddTraceEvent(TraceSeverity severity, const char* message) override {}
void AddTraceEvent(TraceSeverity severity, StringView message) override {}
void set_child(LoadBalancingPolicy* child) { child_ = child; }
@ -428,7 +428,7 @@ void ResolvingLoadBalancingPolicy::ConcatenateAndAddChannelTraceLocked(
size_t len = 0;
UniquePtr<char> message(gpr_strvec_flatten(&v, &len));
channel_control_helper()->AddTraceEvent(ChannelControlHelper::TRACE_INFO,
message.get());
StringView(message.get()));
gpr_strvec_destroy(&v);
}
}

@ -161,7 +161,7 @@ class InterceptRecvTrailingMetadataLoadBalancingPolicy
parent_->channel_control_helper()->RequestReresolution();
}
void AddTraceEvent(TraceSeverity severity, const char* message) override {
void AddTraceEvent(TraceSeverity severity, StringView message) override {
parent_->channel_control_helper()->AddTraceEvent(severity, message);
}

Loading…
Cancel
Save