[READ ONLY MIRROR] Envoy REST/proto API definitions and documentation. (grpc依赖)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1252 lines
65 KiB

syntax = "proto3";
package envoy.config.cluster.v3;
import "envoy/config/cluster/v3/circuit_breaker.proto";
import "envoy/config/cluster/v3/filter.proto";
import "envoy/config/cluster/v3/outlier_detection.proto";
import "envoy/config/core/v3/address.proto";
import "envoy/config/core/v3/base.proto";
import "envoy/config/core/v3/config_source.proto";
import "envoy/config/core/v3/extension.proto";
import "envoy/config/core/v3/health_check.proto";
import "envoy/config/core/v3/protocol.proto";
import "envoy/config/core/v3/resolver.proto";
import "envoy/config/endpoint/v3/endpoint.proto";
import "envoy/type/v3/percent.proto";
import "google/protobuf/any.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/wrappers.proto";
import "xds/core/v3/collection_entry.proto";
import "envoy/annotations/deprecation.proto";
import "udpa/annotations/migrate.proto";
import "udpa/annotations/security.proto";
import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
import "validate/validate.proto";
option java_package = "io.envoyproxy.envoy.config.cluster.v3";
option java_outer_classname = "ClusterProto";
option java_multiple_files = true;
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3";
option (udpa.annotations.file_status).package_version_status = ACTIVE;
// [#protodoc-title: Cluster configuration]
// Cluster list collections. Entries are ``Cluster`` resources or references.
// [#not-implemented-hide:]
message ClusterCollection {
xds.core.v3.CollectionEntry entries = 1;
}
// Configuration for a single upstream cluster.
// [#next-free-field: 57]
message Cluster {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster";
// Refer to :ref:`service discovery type <arch_overview_service_discovery_types>`
// for an explanation on each type.
enum DiscoveryType {
// Refer to the :ref:`static discovery type<arch_overview_service_discovery_types_static>`
// for an explanation.
STATIC = 0;
// Refer to the :ref:`strict DNS discovery
// type<arch_overview_service_discovery_types_strict_dns>`
// for an explanation.
STRICT_DNS = 1;
// Refer to the :ref:`logical DNS discovery
// type<arch_overview_service_discovery_types_logical_dns>`
// for an explanation.
LOGICAL_DNS = 2;
// Refer to the :ref:`service discovery type<arch_overview_service_discovery_types_eds>`
// for an explanation.
EDS = 3;
// Refer to the :ref:`original destination discovery
// type<arch_overview_service_discovery_types_original_destination>`
// for an explanation.
ORIGINAL_DST = 4;
}
// Refer to :ref:`load balancer type <arch_overview_load_balancing_types>` architecture
// overview section for information on each type.
enum LbPolicy {
reserved 4;
reserved "ORIGINAL_DST_LB";
// Refer to the :ref:`round robin load balancing
// policy<arch_overview_load_balancing_types_round_robin>`
// for an explanation.
ROUND_ROBIN = 0;
// Refer to the :ref:`least request load balancing
// policy<arch_overview_load_balancing_types_least_request>`
// for an explanation.
LEAST_REQUEST = 1;
// Refer to the :ref:`ring hash load balancing
// policy<arch_overview_load_balancing_types_ring_hash>`
// for an explanation.
RING_HASH = 2;
// Refer to the :ref:`random load balancing
// policy<arch_overview_load_balancing_types_random>`
// for an explanation.
RANDOM = 3;
// Refer to the :ref:`Maglev load balancing policy<arch_overview_load_balancing_types_maglev>`
// for an explanation.
MAGLEV = 5;
// This load balancer type must be specified if the configured cluster provides a cluster
// specific load balancer. Consult the configured cluster's documentation for whether to set
// this option or not.
CLUSTER_PROVIDED = 6;
// Use the new :ref:`load_balancing_policy
// <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field to determine the LB policy.
// This has been deprecated in favor of using the :ref:`load_balancing_policy
// <envoy_v3_api_field_config.cluster.v3.Cluster.load_balancing_policy>` field without
// setting any value in :ref:`lb_policy<envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>`.
LOAD_BALANCING_POLICY_CONFIG = 7;
}
// When V4_ONLY is selected, the DNS resolver will only perform a lookup for
// addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will
// only perform a lookup for addresses in the IPv6 family. If AUTO is
// specified, the DNS resolver will first perform a lookup for addresses in
// the IPv6 family and fallback to a lookup for addresses in the IPv4 family.
// This is semantically equivalent to a non-existent V6_PREFERRED option.
// AUTO is a legacy name that is more opaque than
// necessary and will be deprecated in favor of V6_PREFERRED in a future major version of the API.
// If V4_PREFERRED is specified, the DNS resolver will first perform a lookup for addresses in the
// IPv4 family and fallback to a lookup for addresses in the IPv6 family. i.e., the callback
// target will only get v6 addresses if there were NO v4 addresses to return.
// If ALL is specified, the DNS resolver will perform a lookup for both IPv4 and IPv6 families,
// and return all resolved addresses. When this is used, Happy Eyeballs will be enabled for
// upstream connections. Refer to :ref:`Happy Eyeballs Support <arch_overview_happy_eyeballs>`
// for more information.
// For cluster types other than
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and
// :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
// this setting is
// ignored.
// [#next-major-version: deprecate AUTO in favor of a V6_PREFERRED option.]
enum DnsLookupFamily {
AUTO = 0;
V4_ONLY = 1;
V6_ONLY = 2;
V4_PREFERRED = 3;
ALL = 4;
}
enum ClusterProtocolSelection {
// Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2).
// If :ref:`http2_protocol_options <envoy_v3_api_field_config.cluster.v3.Cluster.http2_protocol_options>` are
// present, HTTP2 will be used, otherwise HTTP1.1 will be used.
USE_CONFIGURED_PROTOCOL = 0;
// Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection.
USE_DOWNSTREAM_PROTOCOL = 1;
}
// TransportSocketMatch specifies what transport socket config will be used
// when the match conditions are satisfied.
message TransportSocketMatch {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.TransportSocketMatch";
// The name of the match, used in stats generation.
string name = 1 [(validate.rules).string = {min_len: 1}];
// Optional endpoint metadata match criteria.
// The connection to the endpoint with metadata matching what is set in this field
// will use the transport socket configuration specified here.
// The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match
// against the values specified in this field.
google.protobuf.Struct match = 2;
// The configuration of the transport socket.
// [#extension-category: envoy.transport_sockets.upstream]
core.v3.TransportSocket transport_socket = 3;
}
// Extended cluster type.
message CustomClusterType {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.CustomClusterType";
// The type of the cluster to instantiate. The name must match a supported cluster type.
string name = 1 [(validate.rules).string = {min_len: 1}];
// Cluster specific configuration which depends on the cluster being instantiated.
// See the supported cluster for further documentation.
// [#extension-category: envoy.clusters]
google.protobuf.Any typed_config = 2;
}
// Only valid when discovery type is EDS.
message EdsClusterConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.EdsClusterConfig";
// Configuration for the source of EDS updates for this Cluster.
core.v3.ConfigSource eds_config = 1;
// Optional alternative to cluster name to present to EDS. This does not
// have the same restrictions as cluster name, i.e. it may be arbitrary
// length. This may be a xdstp:// URL.
string service_name = 2;
}
// Optionally divide the endpoints in this cluster into subsets defined by
// endpoint metadata and selected by route and weighted cluster metadata.
// [#next-free-field: 9]
message LbSubsetConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.LbSubsetConfig";
// If NO_FALLBACK is selected, a result
// equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected,
// any cluster endpoint may be returned (subject to policy, health checks,
// etc). If DEFAULT_SUBSET is selected, load balancing is performed over the
// endpoints matching the values from the default_subset field.
enum LbSubsetFallbackPolicy {
NO_FALLBACK = 0;
ANY_ENDPOINT = 1;
DEFAULT_SUBSET = 2;
}
enum LbSubsetMetadataFallbackPolicy {
// No fallback. Route metadata will be used as-is.
METADATA_NO_FALLBACK = 0;
// A special metadata key ``fallback_list`` will be used to provide variants of metadata to try.
// Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will
// be merged with route metadata, overriding keys that appear in both places.
// ``fallback_list`` entries will be used in order until a host is found.
//
// ``fallback_list`` key itself is removed from metadata before subset load balancing is performed.
//
// Example:
//
// for metadata:
//
// .. code-block:: yaml
//
// version: 1.0
// fallback_list:
// - version: 2.0
// hardware: c64
// - hardware: c32
// - version: 3.0
//
// at first, metadata:
//
// .. code-block:: json
//
// {"version": "2.0", "hardware": "c64"}
//
// will be used for load balancing. If no host is found, metadata:
//
// .. code-block:: json
//
// {"version": "1.0", "hardware": "c32"}
//
// is next to try. If it still results in no host, finally metadata:
//
// .. code-block:: json
//
// {"version": "3.0"}
//
// is used.
FALLBACK_LIST = 1;
}
// Specifications for subsets.
message LbSubsetSelector {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector";
// Allows to override top level fallback policy per selector.
enum LbSubsetSelectorFallbackPolicy {
// If NOT_DEFINED top level config fallback policy is used instead.
NOT_DEFINED = 0;
// If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported.
NO_FALLBACK = 1;
// If ANY_ENDPOINT is selected, any cluster endpoint may be returned
// (subject to policy, health checks, etc).
ANY_ENDPOINT = 2;
// If DEFAULT_SUBSET is selected, load balancing is performed over the
// endpoints matching the values from the default_subset field.
DEFAULT_SUBSET = 3;
// If KEYS_SUBSET is selected, subset selector matching is performed again with metadata
// keys reduced to
// :ref:`fallback_keys_subset<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.fallback_keys_subset>`.
// It allows for a fallback to a different, less specific selector if some of the keys of
// the selector are considered optional.
KEYS_SUBSET = 4;
}
// List of keys to match with the weighted cluster metadata.
repeated string keys = 1;
// Selects a mode of operation in which each subset has only one host. This mode uses the same rules for
// choosing a host, but updating hosts is faster, especially for large numbers of hosts.
//
// If a match is found to a host, that host will be used regardless of priority levels.
//
// When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys``
// will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge
// :ref:`lb_subsets_single_host_per_subset_duplicate<config_cluster_manager_cluster_stats_subset_lb>` indicates how many duplicates are
// present in the current configuration.
bool single_host_per_subset = 4;
// The behavior used when no endpoint subset matches the selected route's
// metadata.
LbSubsetSelectorFallbackPolicy fallback_policy = 2
[(validate.rules).enum = {defined_only: true}];
// Subset of
// :ref:`keys<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` used by
// :ref:`KEYS_SUBSET<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.LbSubsetSelectorFallbackPolicy.KEYS_SUBSET>`
// fallback policy.
// It has to be a non empty list if KEYS_SUBSET fallback policy is selected.
// For any other fallback policy the parameter is not used and should not be set.
// Only values also present in
// :ref:`keys<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector.keys>` are allowed, but
// ``fallback_keys_subset`` cannot be equal to ``keys``.
repeated string fallback_keys_subset = 3;
}
// The behavior used when no endpoint subset matches the selected route's
// metadata. The value defaults to
// :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}];
// Specifies the default subset of endpoints used during fallback if
// fallback_policy is
// :ref:`DEFAULT_SUBSET<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.DEFAULT_SUBSET>`.
// Each field in default_subset is
// compared to the matching LbEndpoint.Metadata under the ``envoy.lb``
// namespace. It is valid for no hosts to match, in which case the behavior
// is the same as a fallback_policy of
// :ref:`NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetFallbackPolicy.NO_FALLBACK>`.
google.protobuf.Struct default_subset = 2;
// For each entry, LbEndpoint.Metadata's
// ``envoy.lb`` namespace is traversed and a subset is created for each unique
// combination of key and value. For example:
//
// .. code-block:: json
//
// { "subset_selectors": [
// { "keys": [ "version" ] },
// { "keys": [ "stage", "hardware_type" ] }
// ]}
//
// A subset is matched when the metadata from the selected route and
// weighted cluster contains the same keys and values as the subset's
// metadata. The same host may appear in multiple subsets.
repeated LbSubsetSelector subset_selectors = 3;
// If true, routing to subsets will take into account the localities and locality weights of the
// endpoints when making the routing decision.
//
// There are some potential pitfalls associated with enabling this feature, as the resulting
// traffic split after applying both a subset match and locality weights might be undesirable.
//
// Consider for example a situation in which you have 50/50 split across two localities X/Y
// which have 100 hosts each without subsetting. If the subset LB results in X having only 1
// host selected but Y having 100, then a lot more load is being dumped on the single host in X
// than originally anticipated in the load balancing assignment delivered via EDS.
bool locality_weight_aware = 4;
// When used with locality_weight_aware, scales the weight of each locality by the ratio
// of hosts in the subset vs hosts in the original subset. This aims to even out the load
// going to an individual locality if said locality is disproportionately affected by the
// subset predicate.
bool scale_locality_weight = 5;
// If true, when a fallback policy is configured and its corresponding subset fails to find
// a host this will cause any host to be selected instead.
//
// This is useful when using the default subset as the fallback policy, given the default
// subset might become empty. With this option enabled, if that happens the LB will attempt
// to select a host from the entire cluster.
bool panic_mode_any = 6;
// If true, metadata specified for a metadata key will be matched against the corresponding
// endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value
// and any of the elements in the list matches the criteria.
bool list_as_any = 7;
// Fallback mechanism that allows to try different route metadata until a host is found.
// If load balancing process, including all its mechanisms (like
// :ref:`fallback_policy<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy>`)
// fails to select a host, this policy decides if and how the process is repeated using another metadata.
//
// The value defaults to
// :ref:`METADATA_NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy.METADATA_NO_FALLBACK>`.
LbSubsetMetadataFallbackPolicy metadata_fallback_policy = 8
[(validate.rules).enum = {defined_only: true}];
}
// Configuration for :ref:`slow start mode <arch_overview_load_balancing_slow_start>`.
message SlowStartConfig {
// Represents the size of slow start window.
// If set, the newly created host remains in slow start mode starting from its creation time
// for the duration of slow start window.
google.protobuf.Duration slow_start_window = 1;
// This parameter controls the speed of traffic increase over the slow start window. Defaults to 1.0,
// so that endpoint would get linearly increasing amount of traffic.
// When increasing the value for this parameter, the speed of traffic ramp-up increases non-linearly.
// The value of aggression parameter should be greater than 0.0.
// By tuning the parameter, is possible to achieve polynomial or exponential shape of ramp-up curve.
//
// During slow start window, effective weight of an endpoint would be scaled with time factor and aggression:
// ``new_weight = weight * max(min_weight_percent, time_factor ^ (1 / aggression))``,
// where ``time_factor=(time_since_start_seconds / slow_start_time_seconds)``.
//
// As time progresses, more and more traffic would be sent to endpoint, which is in slow start window.
// Once host exits slow start, time_factor and aggression no longer affect its weight.
core.v3.RuntimeDouble aggression = 2;
// Configures the minimum percentage of origin weight that avoids too small new weight,
// which may cause endpoints in slow start mode receive no traffic in slow start window.
// If not specified, the default is 10%.
type.v3.Percent min_weight_percent = 3;
}
// Specific configuration for the RoundRobin load balancing policy.
message RoundRobinLbConfig {
// Configuration for slow start mode.
// If this configuration is not set, slow start will not be not enabled.
SlowStartConfig slow_start_config = 1;
}
// Specific configuration for the LeastRequest load balancing policy.
message LeastRequestLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.LeastRequestLbConfig";
// The number of random healthy hosts from which the host with the fewest active requests will
// be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set.
google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}];
// The following formula is used to calculate the dynamic weights when hosts have different load
// balancing weights:
//
// ``weight = load_balancing_weight / (active_requests + 1)^active_request_bias``
//
// The larger the active request bias is, the more aggressively active requests will lower the
// effective weight when all host weights are not equal.
//
// ``active_request_bias`` must be greater than or equal to 0.0.
//
// When ``active_request_bias == 0.0`` the Least Request Load Balancer doesn't consider the number
// of active requests at the time it picks a host and behaves like the Round Robin Load
// Balancer.
//
// When ``active_request_bias > 0.0`` the Least Request Load Balancer scales the load balancing
// weight by the number of active requests at the time it does a pick.
//
// The value is cached for performance reasons and refreshed whenever one of the Load Balancer's
// host sets changes, e.g., whenever there is a host membership update or a host load balancing
// weight change.
//
// .. note::
// This setting only takes effect if all host weights are not equal.
core.v3.RuntimeDouble active_request_bias = 2;
// Configuration for slow start mode.
// If this configuration is not set, slow start will not be not enabled.
SlowStartConfig slow_start_config = 3;
}
// Specific configuration for the :ref:`RingHash<arch_overview_load_balancing_types_ring_hash>`
// load balancing policy.
message RingHashLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.RingHashLbConfig";
// The hash function used to hash hosts onto the ketama ring.
enum HashFunction {
// Use `xxHash <https://github.com/Cyan4973/xxHash>`_, this is the default hash function.
XX_HASH = 0;
// Use `MurmurHash2 <https://sites.google.com/site/murmurhash/>`_, this is compatible with
// std:hash<string> in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled
// on Linux and not macOS.
MURMUR_HASH_2 = 1;
}
reserved 2;
// Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each
// provided host) the better the request distribution will reflect the desired weights. Defaults
// to 1024 entries, and limited to 8M entries. See also
// :ref:`maximum_ring_size<envoy_v3_api_field_config.cluster.v3.Cluster.RingHashLbConfig.maximum_ring_size>`.
google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}];
// The hash function used to hash hosts onto the ketama ring. The value defaults to
// :ref:`XX_HASH<envoy_v3_api_enum_value_config.cluster.v3.Cluster.RingHashLbConfig.HashFunction.XX_HASH>`.
HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}];
// Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered
// to further constrain resource use. See also
// :ref:`minimum_ring_size<envoy_v3_api_field_config.cluster.v3.Cluster.RingHashLbConfig.minimum_ring_size>`.
google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}];
}
// Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`
// load balancing policy.
message MaglevLbConfig {
// The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee.
// Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same
// upstream as it was before. Increasing the table size reduces the amount of disruption.
// The table size must be prime number limited to 5000011. If it is not specified, the default is 65537.
google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}];
}
// Specific configuration for the
// :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`
// load balancing policy.
message OriginalDstLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.OriginalDstLbConfig";
// When true, a HTTP header can be used to override the original dst address. The default header is
// :ref:`x-envoy-original-dst-host <config_http_conn_man_headers_x-envoy-original-dst-host>`.
//
// .. attention::
//
// This header isn't sanitized by default, so enabling this feature allows HTTP clients to
// route traffic to arbitrary hosts and/or ports, which may have serious security
// consequences.
//
// .. note::
//
// If the header appears multiple times only the first value is used.
bool use_http_header = 1;
// The http header to override destination address if :ref:`use_http_header <envoy_v3_api_field_config.cluster.v3.Cluster.OriginalDstLbConfig.use_http_header>`.
// is set to true. If the value is empty, :ref:`x-envoy-original-dst-host <config_http_conn_man_headers_x-envoy-original-dst-host>` will be used.
string http_header_name = 2;
// The port to override for the original dst address. This port
// will take precedence over filter state and header override ports
google.protobuf.UInt32Value upstream_port_override = 3 [(validate.rules).uint32 = {lte: 65535}];
}
// Common configuration for all load balancer implementations.
// [#next-free-field: 9]
message CommonLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.CommonLbConfig";
// Configuration for :ref:`zone aware routing
// <arch_overview_load_balancing_zone_aware_routing>`.
message ZoneAwareLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig";
// Configures percentage of requests that will be considered for zone aware routing
// if zone aware routing is configured. If not specified, the default is 100%.
// * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.
// * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.
type.v3.Percent routing_enabled = 1;
// Configures minimum upstream cluster size required for zone aware routing
// If upstream cluster size is less than specified, zone aware routing is not performed
// even if zone aware routing is configured. If not specified, the default is 6.
// * :ref:`runtime values <config_cluster_manager_cluster_runtime_zone_routing>`.
// * :ref:`Zone aware routing support <arch_overview_load_balancing_zone_aware_routing>`.
google.protobuf.UInt64Value min_cluster_size = 2;
// If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic
// mode<arch_overview_load_balancing_panic_threshold>`. Instead, the cluster will fail all
// requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a
// failing service.
bool fail_traffic_on_panic = 3;
}
// Configuration for :ref:`locality weighted load balancing
// <arch_overview_load_balancing_locality_weighted_lb>`
message LocalityWeightedLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig";
}
// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
message ConsistentHashingLbConfig {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig";
// If set to ``true``, the cluster will use hostname instead of the resolved
// address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address.
bool use_hostname_for_hashing = 1;
// Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150
// no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster.
// If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200.
// Minimum is 100.
//
// Applies to both Ring Hash and Maglev load balancers.
//
// This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified
// ``hash_balance_factor``, requests to any upstream host are capped at ``hash_balance_factor/100`` times the average number of requests
// across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing
// is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify
// the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the
// cascading overflow effect when choosing the next host in the ring/table).
//
// If weights are specified on the hosts, they are respected.
//
// This is an O(N) algorithm, unlike other load balancers. Using a lower ``hash_balance_factor`` results in more hosts
// being probed, so use a higher value if you require better performance.
google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}];
}
// Configures the :ref:`healthy panic threshold <arch_overview_load_balancing_panic_threshold>`.
// If not specified, the default is 50%.
// To disable panic mode, set to 0%.
//
// .. note::
// The specified percent will be truncated to the nearest 1%.
type.v3.Percent healthy_panic_threshold = 1;
oneof locality_config_specifier {
ZoneAwareLbConfig zone_aware_lb_config = 2;
LocalityWeightedLbConfig locality_weighted_lb_config = 3;
}
// If set, all health check/weight/metadata updates that happen within this duration will be
// merged and delivered in one shot when the duration expires. The start of the duration is when
// the first update happens. This is useful for big clusters, with potentially noisy deploys
// that might trigger excessive CPU usage due to a constant stream of healthcheck state changes
// or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new
// cluster). Please always keep in mind that the use of sandbox technologies may change this
// behavior.
//
// If this is not set, we default to a merge window of 1000ms. To disable it, set the merge
// window to 0.
//
// Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is
// because merging those updates isn't currently safe. See
// https://github.com/envoyproxy/envoy/pull/3941.
google.protobuf.Duration update_merge_window = 4;
// If set to true, Envoy will :ref:`exclude <arch_overview_load_balancing_excluded>` new hosts
// when computing load balancing weights until they have been health checked for the first time.
// This will have no effect unless active health checking is also configured.
bool ignore_new_hosts_until_first_hc = 5;
// If set to ``true``, the cluster manager will drain all existing
// connections to upstream hosts whenever hosts are added or removed from the cluster.
bool close_connections_on_host_set_change = 6;
// Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.)
ConsistentHashingLbConfig consistent_hashing_lb_config = 7;
// This controls what hosts are considered valid when using
// :ref:`host overrides <arch_overview_load_balancing_override_host>`, which is used by some
// filters to modify the load balancing decision.
//
// If this is unset then [UNKNOWN, HEALTHY, DEGRADED] will be applied by default. If this is
// set with an empty set of statuses then host overrides will be ignored by the load balancing.
core.v3.HealthStatusSet override_host_status = 8;
}
message RefreshRate {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate";
// Specifies the base interval between refreshes. This parameter is required and must be greater
// than zero and less than
// :ref:`max_interval <envoy_v3_api_field_config.cluster.v3.Cluster.RefreshRate.max_interval>`.
google.protobuf.Duration base_interval = 1 [(validate.rules).duration = {
required: true
gt {nanos: 1000000}
}];
// Specifies the maximum interval between refreshes. This parameter is optional, but must be
// greater than or equal to the
// :ref:`base_interval <envoy_v3_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>` if set. The default
// is 10 times the :ref:`base_interval <envoy_v3_api_field_config.cluster.v3.Cluster.RefreshRate.base_interval>`.
google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}];
}
message PreconnectPolicy {
// Indicates how many streams (rounded up) can be anticipated per-upstream for each
// incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting
// will only be done if the upstream is healthy and the cluster has traffic.
//
// For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be
// established, one for the new incoming stream, and one for a presumed follow-up stream. For
// HTTP/2, only one connection would be established by default as one connection can
// serve both the original and presumed follow-up stream.
//
// In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100
// active streams, there would be 100 connections in use, and 50 connections preconnected.
// This might be a useful value for something like short lived single-use connections,
// for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection
// termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP
// or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more
// reasonable, where for every 100 connections, 5 preconnected connections would be in the queue
// in case of unexpected disconnects where the connection could not be reused.
//
// If this value is not set, or set explicitly to one, Envoy will fetch as many connections
// as needed to serve streams in flight. This means in steady state if a connection is torn down,
// a subsequent streams will pay an upstream-rtt latency penalty waiting for a new connection.
//
// This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can
// harm latency more than the preconnecting helps.
google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1
[(validate.rules).double = {lte: 3.0 gte: 1.0}];
// Indicates how many many streams (rounded up) can be anticipated across a cluster for each
// stream, useful for low QPS services. This is currently supported for a subset of
// deterministic non-hash-based load-balancing algorithms (weighted round robin, random).
// Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a
// cluster, doing best effort predictions of what upstream would be picked next and
// pre-establishing a connection.
//
// Preconnecting will be limited to one preconnect per configured upstream in the cluster and will
// only be done if there are healthy upstreams and the cluster has traffic.
//
// For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first
// incoming stream, 2 connections will be preconnected - one to the first upstream for this
// cluster, one to the second on the assumption there will be a follow-up stream.
//
// If this value is not set, or set explicitly to one, Envoy will fetch as many connections
// as needed to serve streams in flight, so during warm up and in steady state if a connection
// is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for
// connection establishment.
//
// If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met,
// basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each
// upstream.
google.protobuf.DoubleValue predictive_preconnect_ratio = 2
[(validate.rules).double = {lte: 3.0 gte: 1.0}];
}
reserved 12, 15, 7, 11, 35;
reserved "hosts", "tls_context", "extension_protocol_options";
// Configuration to use different transport sockets for different endpoints.
// The entry of ``envoy.transport_socket_match`` in the
// :ref:`LbEndpoint.Metadata <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>`
// is used to match against the transport sockets as they appear in the list. The first
// :ref:`match <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used.
// For example, with the following match
//
// .. code-block:: yaml
//
// transport_socket_matches:
// - name: "enableMTLS"
// match:
// acceptMTLS: true
// transport_socket:
// name: envoy.transport_sockets.tls
// config: { ... } # tls socket configuration
// - name: "defaultToPlaintext"
// match: {}
// transport_socket:
// name: envoy.transport_sockets.raw_buffer
//
// Connections to the endpoints whose metadata value under ``envoy.transport_socket_match``
// having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration.
//
// If a :ref:`socket match <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` with empty match
// criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext"
// socket match in case above.
//
// If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any
// ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or
// ``transport_socket`` specified in this cluster.
//
// This field allows gradual and flexible transport socket configuration changes.
//
// The metadata of endpoints in EDS can indicate transport socket capabilities. For example,
// an endpoint's metadata can have two key value pairs as "acceptMTLS": "true",
// "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic
// has "acceptPlaintext": "true" metadata information.
//
// Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS
// traffic for endpoints with "acceptMTLS": "true", by adding a corresponding
// ``TransportSocketMatch`` in this field. Other client Envoys receive CDS without
// ``transport_socket_match`` set, and still send plain text traffic to the same cluster.
//
// This field can be used to specify custom transport socket configurations for health
// checks by adding matching key/value pairs in a health check's
// :ref:`transport socket match criteria <envoy_v3_api_field_config.core.v3.HealthCheck.transport_socket_match_criteria>` field.
//
// [#comment:TODO(incfly): add a detailed architecture doc on intended usage.]
repeated TransportSocketMatch transport_socket_matches = 43;
// Supplies the name of the cluster which must be unique across all clusters.
// The cluster name is used when emitting
// :ref:`statistics <config_cluster_manager_cluster_stats>` if :ref:`alt_stat_name
// <envoy_v3_api_field_config.cluster.v3.Cluster.alt_stat_name>` is not provided.
// Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics.
string name = 1 [(validate.rules).string = {min_len: 1}];
// An optional alternative to the cluster name to be used for observability. This name is used
// emitting stats for the cluster and access logging the cluster name. This will appear as
// additional information in configuration dumps of a cluster's current status as
// :ref:`observability_name <envoy_v3_api_field_admin.v3.ClusterStatus.observability_name>`
// and as an additional tag "upstream_cluster.name" while tracing. Note: Any ``:`` in the name
// will be converted to ``_`` when emitting statistics. This should not be confused with
// :ref:`Router Filter Header <config_http_filters_router_x-envoy-upstream-alt-stat-name>`.
string alt_stat_name = 28 [(udpa.annotations.field_migrate).rename = "observability_name"];
oneof cluster_discovery_type {
// The :ref:`service discovery type <arch_overview_service_discovery_types>`
// to use for resolving the cluster.
DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}];
// The custom cluster type.
CustomClusterType cluster_type = 38;
}
// Configuration to use for EDS updates for the Cluster.
EdsClusterConfig eds_cluster_config = 3;
// The timeout for new network connections to hosts in the cluster.
// If not set, a default value of 5s will be used.
google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}];
// Soft limit on size of the cluster’s connections read and write buffers. If
// unspecified, an implementation defined default is applied (1MiB).
google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5
[(udpa.annotations.security).configure_for_untrusted_upstream = true];
// The :ref:`load balancer type <arch_overview_load_balancing_types>` to use
// when picking a host in the cluster.
LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}];
// Setting this is required for specifying members of
// :ref:`STATIC<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STATIC>`,
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` clusters.
// This field supersedes the ``hosts`` field in the v2 API.
//
// .. attention::
//
// Setting this allows non-EDS cluster types to contain embedded EDS equivalent
// :ref:`endpoint assignments<envoy_v3_api_msg_config.endpoint.v3.ClusterLoadAssignment>`.
//
endpoint.v3.ClusterLoadAssignment load_assignment = 33;
// Optional :ref:`active health checking <arch_overview_health_checking>`
// configuration for the cluster. If no
// configuration is specified no health checking will be done and all cluster
// members will be considered healthy at all times.
repeated core.v3.HealthCheck health_checks = 8;
// Optional maximum requests for a single upstream connection. This parameter
// is respected by both the HTTP/1.1 and HTTP/2 connection pool
// implementations. If not specified, there is no limit. Setting this
// parameter to 1 will effectively disable keep alive.
//
// .. attention::
// This field has been deprecated in favor of the :ref:`max_requests_per_connection <envoy_v3_api_field_config.core.v3.HttpProtocolOptions.max_requests_per_connection>` field.
google.protobuf.UInt32Value max_requests_per_connection = 9
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Optional :ref:`circuit breaking <arch_overview_circuit_break>` for the cluster.
CircuitBreakers circuit_breakers = 10;
// HTTP protocol options that are applied only to upstream HTTP connections.
// These options apply to all HTTP versions.
// This has been deprecated in favor of
// :ref:`upstream_http_protocol_options <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstream_http_protocol_options>`
// in the :ref:`http_protocol_options <envoy_v3_api_msg_extensions.upstreams.http.v3.HttpProtocolOptions>` message.
// upstream_http_protocol_options can be set via the cluster's
// :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`.
// See :ref:`upstream_http_protocol_options
// <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstream_http_protocol_options>`
// for example usage.
core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Additional options when handling HTTP requests upstream. These options will be applicable to
// both HTTP1 and HTTP2 requests.
// This has been deprecated in favor of
// :ref:`common_http_protocol_options <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.common_http_protocol_options>`
// in the :ref:`http_protocol_options <envoy_v3_api_msg_extensions.upstreams.http.v3.HttpProtocolOptions>` message.
// common_http_protocol_options can be set via the cluster's
// :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`.
// See :ref:`upstream_http_protocol_options
// <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstream_http_protocol_options>`
// for example usage.
core.v3.HttpProtocolOptions common_http_protocol_options = 29
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Additional options when handling HTTP1 requests.
// This has been deprecated in favor of http_protocol_options fields in the
// :ref:`http_protocol_options <envoy_v3_api_msg_extensions.upstreams.http.v3.HttpProtocolOptions>` message.
// http_protocol_options can be set via the cluster's
// :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`.
// See :ref:`upstream_http_protocol_options
// <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstream_http_protocol_options>`
// for example usage.
core.v3.Http1ProtocolOptions http_protocol_options = 13
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Even if default HTTP2 protocol options are desired, this field must be
// set so that Envoy will assume that the upstream supports HTTP/2 when
// making new HTTP connection pool connections. Currently, Envoy only
// supports prior knowledge for upstream connections. Even if TLS is used
// with ALPN, ``http2_protocol_options`` must be specified. As an aside this allows HTTP/2
// connections to happen over plain text.
// This has been deprecated in favor of http2_protocol_options fields in the
// :ref:`http_protocol_options <envoy_v3_api_msg_extensions.upstreams.http.v3.HttpProtocolOptions>`
// message. http2_protocol_options can be set via the cluster's
// :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`.
// See :ref:`upstream_http_protocol_options
// <envoy_v3_api_field_extensions.upstreams.http.v3.HttpProtocolOptions.upstream_http_protocol_options>`
// for example usage.
core.v3.Http2ProtocolOptions http2_protocol_options = 14 [
deprecated = true,
(udpa.annotations.security).configure_for_untrusted_upstream = true,
(envoy.annotations.deprecated_at_minor_version) = "3.0"
];
// The extension_protocol_options field is used to provide extension-specific protocol options
// for upstream connections. The key should match the extension filter name, such as
// "envoy.filters.network.thrift_proxy". See the extension's documentation for details on
// specific options.
// [#next-major-version: make this a list of typed extensions.]
map<string, google.protobuf.Any> typed_extension_protocol_options = 36;
// If the DNS refresh rate is specified and the cluster type is either
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
// this value is used as the cluster’s DNS refresh
// rate. The value configured must be at least 1ms. If this setting is not specified, the
// value defaults to 5000ms. For cluster types other than
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`
// and :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`
// this setting is ignored.
google.protobuf.Duration dns_refresh_rate = 16
[(validate.rules).duration = {gt {nanos: 1000000}}];
// If the DNS failure refresh rate is specified and the cluster type is either
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
// this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is
// not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types
// other than :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>` and
// :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>` this setting is
// ignored.
RefreshRate dns_failure_refresh_rate = 44;
// Optional configuration for setting cluster's DNS refresh rate. If the value is set to true,
// cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS
// resolution.
bool respect_dns_ttl = 39;
// The DNS IP address resolution policy. If this setting is not specified, the
// value defaults to
// :ref:`AUTO<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DnsLookupFamily.AUTO>`.
DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}];
// If DNS resolvers are specified and the cluster type is either
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
// this value is used to specify the cluster’s dns resolvers.
// If this setting is not specified, the value defaults to the default
// resolver, which uses /etc/resolv.conf for configuration. For cluster types
// other than
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`
// and :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`
// this setting is ignored.
// This field is deprecated in favor of ``dns_resolution_config``
// which aggregates all of the DNS resolver configuration in a single message.
repeated core.v3.Address dns_resolvers = 18
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Always use TCP queries instead of UDP queries for DNS lookups.
// This field is deprecated in favor of ``dns_resolution_config``
// which aggregates all of the DNS resolver configuration in a single message.
bool use_tcp_for_dns_lookups = 45
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// DNS resolution configuration which includes the underlying dns resolver addresses and options.
// This field is deprecated in favor of
// :ref:`typed_dns_resolver_config <envoy_v3_api_field_config.cluster.v3.Cluster.typed_dns_resolver_config>`.
core.v3.DnsResolutionConfig dns_resolution_config = 53
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// DNS resolver type configuration extension. This extension can be used to configure c-ares, apple,
// or any other DNS resolver types and the related parameters.
// For example, an object of
// :ref:`CaresDnsResolverConfig <envoy_v3_api_msg_extensions.network.dns_resolver.cares.v3.CaresDnsResolverConfig>`
// can be packed into this ``typed_dns_resolver_config``. This configuration replaces the
// :ref:`dns_resolution_config <envoy_v3_api_field_config.cluster.v3.Cluster.dns_resolution_config>`
// configuration.
// During the transition period when both ``dns_resolution_config`` and ``typed_dns_resolver_config`` exists,
// when ``typed_dns_resolver_config`` is in place, Envoy will use it and ignore ``dns_resolution_config``.
// When ``typed_dns_resolver_config`` is missing, the default behavior is in place.
// [#extension-category: envoy.network.dns_resolver]
core.v3.TypedExtensionConfig typed_dns_resolver_config = 55;
// Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`.
// If true, cluster readiness blocks on warm-up. If false, the cluster will complete
// initialization whether or not warm-up has completed. Defaults to true.
google.protobuf.BoolValue wait_for_warm_on_init = 54;
// If specified, outlier detection will be enabled for this upstream cluster.
// Each of the configuration values can be overridden via
// :ref:`runtime values <config_cluster_manager_cluster_runtime_outlier_detection>`.
OutlierDetection outlier_detection = 19;
// The interval for removing stale hosts from a cluster type
// :ref:`ORIGINAL_DST<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`.
// Hosts are considered stale if they have not been used
// as upstream destinations during this interval. New hosts are added
// to original destination clusters on demand as new connections are
// redirected to Envoy, causing the number of hosts in the cluster to
// grow over time. Hosts that are not stale (they are actively used as
// destinations) are kept in the cluster, which allows connections to
// them remain open, saving the latency that would otherwise be spent
// on opening new connections. If this setting is not specified, the
// value defaults to 5000ms. For cluster types other than
// :ref:`ORIGINAL_DST<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.ORIGINAL_DST>`
// this setting is ignored.
google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}];
// Optional configuration used to bind newly established upstream connections.
// This overrides any bind_config specified in the bootstrap proto.
// If the address and port are empty, no bind will be performed.
core.v3.BindConfig upstream_bind_config = 21;
// Configuration for load balancing subsetting.
LbSubsetConfig lb_subset_config = 22;
// Optional configuration for the load balancing algorithm selected by
// LbPolicy. Currently only
// :ref:`RING_HASH<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbPolicy.RING_HASH>`,
// :ref:`MAGLEV<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbPolicy.MAGLEV>` and
// :ref:`LEAST_REQUEST<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbPolicy.LEAST_REQUEST>`
// has additional configuration options.
// Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding
// LbPolicy will generate an error at runtime.
oneof lb_config {
// Optional configuration for the Ring Hash load balancing policy.
RingHashLbConfig ring_hash_lb_config = 23;
// Optional configuration for the Maglev load balancing policy.
MaglevLbConfig maglev_lb_config = 52;
// Optional configuration for the Original Destination load balancing policy.
OriginalDstLbConfig original_dst_lb_config = 34;
// Optional configuration for the LeastRequest load balancing policy.
LeastRequestLbConfig least_request_lb_config = 37;
// Optional configuration for the RoundRobin load balancing policy.
RoundRobinLbConfig round_robin_lb_config = 56;
}
// Common configuration for all load balancer implementations.
CommonLbConfig common_lb_config = 27;
// Optional custom transport socket implementation to use for upstream connections.
// To setup TLS, set a transport socket with name ``envoy.transport_sockets.tls`` and
// :ref:`UpstreamTlsContexts <envoy_v3_api_msg_extensions.transport_sockets.tls.v3.UpstreamTlsContext>` in the ``typed_config``.
// If no transport socket configuration is specified, new connections
// will be set up with plaintext.
core.v3.TransportSocket transport_socket = 24;
// The Metadata field can be used to provide additional information about the
// cluster. It can be used for stats, logging, and varying filter behavior.
// Fields should use reverse DNS notation to denote which entity within Envoy
// will need the information. For instance, if the metadata is intended for
// the Router filter, the filter name should be specified as ``envoy.filters.http.router``.
core.v3.Metadata metadata = 25;
// Determines how Envoy selects the protocol used to speak to upstream hosts.
// This has been deprecated in favor of setting explicit protocol selection
// in the :ref:`http_protocol_options
// <envoy_v3_api_msg_extensions.upstreams.http.v3.HttpProtocolOptions>` message.
// http_protocol_options can be set via the cluster's
// :ref:`extension_protocol_options<envoy_v3_api_field_config.cluster.v3.Cluster.typed_extension_protocol_options>`.
ClusterProtocolSelection protocol_selection = 26
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Optional options for upstream connections.
UpstreamConnectionOptions upstream_connection_options = 30;
// If an upstream host becomes unhealthy (as determined by the configured health checks
// or outlier detection), immediately close all connections to the failed host.
//
// .. note::
//
// This is currently only supported for connections created by tcp_proxy.
//
// .. note::
//
// The current implementation of this feature closes all connections immediately when
// the unhealthy status is detected. If there are a large number of connections open
// to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of
// time exclusively closing these connections, and not processing any other traffic.
bool close_connections_on_host_health_failure = 31;
// If set to true, Envoy will ignore the health value of a host when processing its removal
// from service discovery. This means that if active health checking is used, Envoy will *not*
// wait for the endpoint to go unhealthy before removing it.
bool ignore_health_on_host_removal = 32;
// An (optional) network filter chain, listed in the order the filters should be applied.
// The chain will be applied to all outgoing connections that Envoy makes to the upstream
// servers of this cluster.
repeated Filter filters = 40;
// If this field is set and is supported by the client, it will supersede the value of
// :ref:`lb_policy<envoy_v3_api_field_config.cluster.v3.Cluster.lb_policy>`.
LoadBalancingPolicy load_balancing_policy = 41;
// [#not-implemented-hide:]
// If present, tells the client where to send load reports via LRS. If not present, the
// client will fall back to a client-side default, which may be either (a) don't send any
// load reports or (b) send load reports for all clusters to a single default server
// (which may be configured in the bootstrap file).
//
// Note that if multiple clusters point to the same LRS server, the client may choose to
// create a separate stream for each cluster or it may choose to coalesce the data for
// multiple clusters onto a single stream. Either way, the client must make sure to send
// the data for any given cluster on no more than one stream.
//
// [#next-major-version: In the v3 API, we should consider restructuring this somehow,
// maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation
// from the LRS stream here.]
core.v3.ConfigSource lrs_server = 42;
// If track_timeout_budgets is true, the :ref:`timeout budget histograms
// <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each
// request. These show what percentage of a request's per try and global timeout was used. A value
// of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value
// of 100 would indicate that the request took the entirety of the timeout given to it.
//
// .. attention::
//
// This field has been deprecated in favor of ``timeout_budgets``, part of
// :ref:`track_cluster_stats <envoy_v3_api_field_config.cluster.v3.Cluster.track_cluster_stats>`.
bool track_timeout_budgets = 47
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
// Optional customization and configuration of upstream connection pool, and upstream type.
//
// Currently this field only applies for HTTP traffic but is designed for eventual use for custom
// TCP upstreams.
//
// For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream
// HTTP, using the http connection pool and the codec from ``http2_protocol_options``
//
// For routes where CONNECT termination is configured, Envoy will take downstream CONNECT
// requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool.
//
// The default pool used is the generic connection pool which creates the HTTP upstream for most
// HTTP requests, and the TCP upstream if CONNECT termination is configured.
//
// If users desire custom connection pool or upstream behavior, for example terminating
// CONNECT only if a custom filter indicates it is appropriate, the custom factories
// can be registered and configured here.
// [#extension-category: envoy.upstreams]
core.v3.TypedExtensionConfig upstream_config = 48;
// Configuration to track optional cluster stats.
TrackClusterStats track_cluster_stats = 49;
// Preconnect configuration for this cluster.
PreconnectPolicy preconnect_policy = 50;
// If ``connection_pool_per_downstream_connection`` is true, the cluster will use a separate
// connection pool for every downstream connection
bool connection_pool_per_downstream_connection = 51;
}
// Extensible load balancing policy configuration.
//
// Every LB policy defined via this mechanism will be identified via a unique name using reverse
// DNS notation. If the policy needs configuration parameters, it must define a message for its
// own configuration, which will be stored in the config field. The name of the policy will tell
// clients which type of message they should expect to see in the config field.
//
// Note that there are cases where it is useful to be able to independently select LB policies
// for choosing a locality and for choosing an endpoint within that locality. For example, a
// given deployment may always use the same policy to choose the locality, but for choosing the
// endpoint within the locality, some clusters may use weighted-round-robin, while others may
// use some sort of session-based balancing.
//
// This can be accomplished via hierarchical LB policies, where the parent LB policy creates a
// child LB policy for each locality. For each request, the parent chooses the locality and then
// delegates to the child policy for that locality to choose the endpoint within the locality.
//
// To facilitate this, the config message for the top-level LB policy may include a field of
// type LoadBalancingPolicy that specifies the child policy.
message LoadBalancingPolicy {
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy";
message Policy {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.LoadBalancingPolicy.Policy";
reserved 2, 1, 3;
reserved "config", "name", "typed_config";
core.v3.TypedExtensionConfig typed_extension_config = 4;
}
// Each client will iterate over the list in order and stop at the first policy that it
// supports. This provides a mechanism for starting to use new LB policies that are not yet
// supported by all clients.
repeated Policy policies = 1;
}
message UpstreamConnectionOptions {
option (udpa.annotations.versioning).previous_message_type =
"envoy.api.v2.UpstreamConnectionOptions";
// If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.
core.v3.TcpKeepalive tcp_keepalive = 1;
// If enabled, associates the interface name of the local address with the upstream connection.
// This can be used by extensions during processing of requests. The association mechanism is
// implementation specific. Defaults to false due to performance concerns.
bool set_local_interface_name_on_upstream_connections = 2;
}
message TrackClusterStats {
// If timeout_budgets is true, the :ref:`timeout budget histograms
// <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each
// request. These show what percentage of a request's per try and global timeout was used. A value
// of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value
// of 100 would indicate that the request took the entirety of the timeout given to it.
bool timeout_budgets = 1;
// If request_response_sizes is true, then the :ref:`histograms
// <config_cluster_manager_cluster_stats_request_response_sizes>` tracking header and body sizes
// of requests and responses will be published.
bool request_response_sizes = 2;
}