syntax = "proto3"; package envoy.api.v2; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_package = "io.envoyproxy.envoy.api.v2"; option java_generic_services = true; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/auth/cert.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/discovery.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/api/v2/core/protocol.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; import "envoy/api/v2/eds.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "validate/validate.proto"; // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http) = { post: "/v2/discovery:clusters" body: "*" }; } } // [#protodoc-title: Clusters] // Configuration for a single upstream cluster. // [#comment:next free field: 43] message Cluster { // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. string name = 1 [(validate.rules).string.min_bytes = 1]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be // confused with :ref:`Router Filter Header // `. string alt_stat_name = 28; // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { // Refer to the :ref:`static discovery type` // for an explanation. STATIC = 0; // Refer to the :ref:`strict DNS discovery // type` // for an explanation. STRICT_DNS = 1; // Refer to the :ref:`logical DNS discovery // type` // for an explanation. LOGICAL_DNS = 2; // Refer to the :ref:`service discovery type` // for an explanation. EDS = 3; // Refer to the :ref:`original destination discovery // type` // for an explanation. ORIGINAL_DST = 4; } // Extended cluster type. message CustomClusterType { // The type of the cluster to instantiate. The name must match a supported cluster type. string name = 1 [(validate.rules).string.min_bytes = 1]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. google.protobuf.Any typed_config = 2; } oneof cluster_discovery_type { // The :ref:`service discovery type ` // to use for resolving the cluster. DiscoveryType type = 2 [(validate.rules).enum.defined_only = true]; // The custom cluster type. CustomClusterType cluster_type = 38; } // Only valid when discovery type is EDS. message EdsClusterConfig { // Configuration for the source of EDS updates for this Cluster. core.ConfigSource eds_config = 1; // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. string service_name = 2; } // Configuration to use for EDS updates for the Cluster. EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration.gt = {}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { // Refer to the :ref:`round robin load balancing // policy` // for an explanation. ROUND_ROBIN = 0; // Refer to the :ref:`least request load balancing // policy` // for an explanation. LEAST_REQUEST = 1; // Refer to the :ref:`ring hash load balancing // policy` // for an explanation. RING_HASH = 2; // Refer to the :ref:`random load balancing // policy` // for an explanation. RANDOM = 3; // Refer to the :ref:`original destination load balancing // policy` // for an explanation. // // .. attention:: // // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. // ORIGINAL_DST_LB = 4 [deprecated = true]; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. MAGLEV = 5; // This load balancer type must be specified if the configured cluster provides a cluster // specific load balancer. Consult the configured cluster's documentation for whether to set // this option or not. CLUSTER_PROVIDED = 6; // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } // The :ref:`load balancer type ` to use // when picking a host in the cluster. LbPolicy lb_policy = 6 [(validate.rules).enum.defined_only = true]; // If the service discovery type is // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS`, // then hosts is required. // // .. attention:: // // **This field is deprecated**. Set the // :ref:`load_assignment` field instead. // repeated core.Address hosts = 7; // Setting this is required for specifying members of // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes :ref:`hosts` field. // [#comment:TODO(dio): Deprecate the hosts field and add it to :ref:`deprecated log` // once load_assignment is implemented.] // // .. attention:: // // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // Setting this overrides :ref:`hosts` values. // ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. repeated core.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. cluster.CircuitBreakers circuit_breakers = 10; // The TLS configuration for connections to the upstream cluster. If no TLS // configuration is specified, TLS will not be used for new connections. // // .. attention:: // // Server certificate verification is not enabled by default. Configure // :ref:`trusted_ca` to enable // verification. auth.UpstreamTlsContext tls_context = 11; reserved 12; // Additional options when handling HTTP requests. These options will be applicable to both // HTTP1 and HTTP2 requests. core.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. core.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when // making new HTTP connection pool connections. Currently, Envoy only // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. core.Http2ProtocolOptions http2_protocol_options = 14; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map extension_protocol_options = 35; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map typed_extension_protocol_options = 36; reserved 15; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used as the cluster’s DNS refresh // rate. If this setting is not specified, the value defaults to 5000ms. For // cluster types other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration.gt = {}]; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. bool respect_dns_ttl = 39; // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; } // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum.defined_only = true]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used to specify the cluster’s dns resolvers. // If this setting is not specified, the value defaults to the default // resolver, which uses /etc/resolv.conf for configuration. For cluster types // other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. repeated core.Address dns_resolvers = 18; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. cluster.OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. // Hosts are considered stale if they have not been used // as upstream destinations during this interval. New hosts are added // to original destination clusters on demand as new connections are // redirected to Envoy, causing the number of hosts in the cluster to // grow over time. Hosts that are not stale (they are actively used as // destinations) are kept in the cluster, which allows connections to // them remain open, saving the latency that would otherwise be spent // on opening new connections. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration.gt = {}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. core.BindConfig upstream_bind_config = 21; // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. message LbSubsetConfig { // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. enum LbSubsetFallbackPolicy { NO_FALLBACK = 0; ANY_ENDPOINT = 1; DEFAULT_SUBSET = 2; } // The behavior used when no endpoint subset matches the selected route's // metadata. The value defaults to // :ref:`NO_FALLBACK`. LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum.defined_only = true]; // Specifies the default subset of endpoints used during fallback if // fallback_policy is // :ref:`DEFAULT_SUBSET`. // Each field in default_subset is // compared to the matching LbEndpoint.Metadata under the *envoy.lb* // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. google.protobuf.Struct default_subset = 2; // Specifications for subsets. message LbSubsetSelector { // List of keys to match with the weighted cluster metadata. repeated string keys = 1; // The behavior used when no endpoint subset matches the selected route's // metadata. LbSubsetSelectorFallbackPolicy fallback_policy = 2 [(validate.rules).enum.defined_only = true]; // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; } } // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: // // .. code-block:: json // // { "subset_selectors": [ // { "keys": [ "version" ] }, // { "keys": [ "stage", "hardware_type" ] } // ]} // // A subset is matched when the metadata from the selected route and // weighted cluster contains the same keys and values as the subset's // metadata. The same host may appear in multiple subsets. repeated LbSubsetSelector subset_selectors = 3; // If true, routing to subsets will take into account the localities and locality weights of the // endpoints when making the routing decision. // // There are some potential pitfalls associated with enabling this feature, as the resulting // traffic split after applying both a subset match and locality weights might be undesirable. // // Consider for example a situation in which you have 50/50 split across two localities X/Y // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 // host selected but Y having 100, then a lot more load is being dumped on the single host in X // than originally anticipated in the load balancing assignment delivered via EDS. bool locality_weight_aware = 4; // When used with locality_weight_aware, scales the weight of each locality by the ratio // of hosts in the subset vs hosts in the original subset. This aims to even out the load // going to an individual locality if said locality is disproportionally affected by the // subset predicate. bool scale_locality_weight = 5; // If true, when a fallback policy is configured and its corresponding subset fails to find // a host this will cause any host to be selected instead. // // This is useful when using the default subset as the fallback policy, given the default // subset might become empty. With this option enabled, if that happens the LB will attempt // to select a host from the entire cluster. bool panic_mode_any = 6; // If true, metadata specified for a metadata key will be matched against the corresponding // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value // and any of the elements in the list matches the criteria. bool list_as_any = 7; } // Configuration for load balancing subsetting. LbSubsetConfig lb_subset_config = 22; // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32.gte = 2]; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64.lte = 8388608]; reserved 2; // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction hash_function = 3 [(validate.rules).enum.defined_only = true]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64.lte = 8388608]; } // Specific configuration for the // :ref:`Original Destination ` // load balancing policy. message OriginalDstLbConfig { // When true, :ref:`x-envoy-original-dst-host // ` can be used to override destination // address. // // .. attention:: // // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. bool use_http_header = 1; } // Optional configuration for the load balancing algorithm selected by // LbPolicy. Currently only // :ref:`RING_HASH` and // :ref:`LEAST_REQUEST` // has additional configuration options. // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding // LbPolicy will generate an error at runtime. oneof lb_config { // Optional configuration for the Ring Hash load balancing policy. RingHashLbConfig ring_hash_lb_config = 23; // Optional configuration for the Original Destination load balancing policy. OriginalDstLbConfig original_dst_lb_config = 34; // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; } // Common configuration for all load balancer implementations. message CommonLbConfig { // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. // // .. note:: // The specified percent will be truncated to the nearest 1%. envoy.type.Percent healthy_panic_threshold = 1; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { // Configures percentage of requests that will be considered for zone aware routing // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. envoy.type.Percent routing_enabled = 1; // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a // failing service. bool fail_traffic_on_panic = 3; } // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { } oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; LocalityWeightedLbConfig locality_weighted_lb_config = 3; } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new // cluster). Please always keep in mind that the use of sandbox technologies may change this // behavior. // // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge // window to 0. // // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless // active health checking is also configured. // // Ignoring a host means that for any load balancing calculations that adjust weights based // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and // panic mode) Envoy will exclude these hosts in the denominator. // // For example, with hosts in two priorities P0 and P1, where P0 looks like // {healthy, unhealthy (new), unhealthy (new)} // and where P1 looks like // {healthy, healthy} // all traffic will still hit P0, as 1 / (3 - 2) = 1. // // Enabling this will allow scaling up the number of hosts for a given cluster without entering // panic mode or triggering priority spillover, assuming the hosts pass the first health check. // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. bool ignore_new_hosts_until_first_hc = 5; // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; } // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. core.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.router*. core.Metadata metadata = 25; enum ClusterProtocolSelection { // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). // If :ref:`http2_protocol_options ` are // present, HTTP2 will be used, otherwise HTTP1.1 will be used. USE_CONFIGURED_PROTOCOL = 0; // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. USE_DOWNSTREAM_PROTOCOL = 1; } // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. envoy.api.v2.UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. // // .. note:: // // This is currently only supported for connections created by tcp_proxy. // // .. note:: // // The current implementation of this feature closes all connections immediately when // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. bool close_connections_on_host_health_failure = 31; // If this cluster uses EDS or STRICT_DNS to configure its hosts, immediately drain // connections from any hosts that are removed from service discovery. // // This only affects behavior for hosts that are being actively health checked. // If this flag is not set to true, Envoy will wait until the hosts fail active health // checking before removing it from the cluster. bool drain_connections_on_host_removal = 32; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. repeated cluster.Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] // If present, tells the client where to send load reports via LRS. If not present, the // client will fall back to a client-side default, which may be either (a) don't send any // load reports or (b) send load reports for all clusters to a single default server // (which may be configured in the bootstrap file). // // Note that if multiple clusters point to the same LRS server, the client may choose to // create a separate stream for each cluster or it may choose to coalesce the data for // multiple clusters onto a single stream. Either way, the client must make sure to send // the data for any given cluster on no more than one stream. // // [#next-major-version: In the v3 API, we should consider restructuring this somehow, // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] core.ConfigSource lrs_server = 42; } // [#not-implemented-hide:] Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its // own configuration, which will be stored in the config field. The name of the policy will tell // clients which type of message they should expect to see in the config field. // // Note that there are cases where it is useful to be able to independently select LB policies // for choosing a locality and for choosing an endpoint within that locality. For example, a // given deployment may always use the same policy to choose the locality, but for choosing the // endpoint within the locality, some clusters may use weighted-round-robin, while others may // use some sort of session-based balancing. // // This can be accomplished via hierarchical LB policies, where the parent LB policy creates a // child LB policy for each locality. For each request, the parent chooses the locality and then // delegates to the child policy for that locality to choose the endpoint within the locality. // // To facilitate this, the config message for the top-level LB policy may include a field of // type LoadBalancingPolicy that specifies the child policy. // // [#proto-status: experimental] message LoadBalancingPolicy { message Policy { // Required. The name of the LB policy. string name = 1; // Optional config for the LB policy. // No more than one of these two fields may be populated. google.protobuf.Struct config = 2; google.protobuf.Any typed_config = 3; } // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. repeated Policy policies = 1; } // An extensible structure containing the address Envoy should bind to when // establishing upstream connections. message UpstreamBindConfig { // The address Envoy should bind to when establishing upstream connections. core.Address source_address = 1; } message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.TcpKeepalive tcp_keepalive = 1; }