api: minified v2 -> v3alpha upgrade via type dependency analysis. (#8529)
This patch moves away from the paradigm of sed-style upgrading of every v2 package to v3alpha. Instead, an additional type analysis phase is performed prior to protoxform by a protoc plugin known as the "type whisperer". The type whisperer produces structured type dependency information for each .proto. The tools/type_whisperer/typedb_gen.py tool then knits these together to provide an API-wide type dependency graph. This is then used to determine which types need upgrading (either they have breaking changes or transitively depend on types with such changes). Only packages with upgraded types now undergo the v2 -> v3alpha transition. The API type database is checked into source/common/config/api_type_db.pb. This may seem a strange location, but in the future we will include the type database as a build artifact for the Envoy binary, as it will be used by the reflection-based version converter to find the type upgrade path for input proto. Risk level: Low (the v3alpha protos are not used yet). Testing: fix_format, manual inspection of diffs, bazel test //test/..., docs build. Part of #8082 Fixes #8490 Signed-off-by: Harvey Tuch <htuch@google.com> Mirrored from https://github.com/envoyproxy/envoy @ ad57b58cfbb256af41a467260dce2a8013b7a7famaster-ci-test
parent
0cac03fc44
commit
4f2a5fb69a
119 changed files with 973 additions and 1378 deletions
@ -1,13 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.dubbo.router.v3alpha; |
||||
|
||||
option java_outer_classname = "RouterProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v3alpha"; |
||||
|
||||
// [#protodoc-title: Router] |
||||
// Dubbo router :ref:`configuration overview <config_dubbo_filters_router>`. |
||||
|
||||
message Router { |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,34 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.buffer.v3alpha; |
||||
|
||||
option java_outer_classname = "BufferProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v3alpha"; |
||||
|
||||
import "google/protobuf/wrappers.proto"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Buffer] |
||||
// Buffer :ref:`configuration overview <config_http_filters_buffer>`. |
||||
|
||||
message Buffer { |
||||
reserved 2; |
||||
|
||||
// The maximum request size that the filter will buffer before the connection |
||||
// manager will stop buffering and return a 413 response. |
||||
google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; |
||||
} |
||||
|
||||
message BufferPerRoute { |
||||
oneof override { |
||||
option (validate.required) = true; |
||||
|
||||
// Disable the buffer filter for this particular vhost or route. |
||||
bool disabled = 1 [(validate.rules).bool = {const: true}]; |
||||
|
||||
// Override the global configuration of the filter with this new config. |
||||
Buffer buffer = 2 [(validate.rules).message = {required: true}]; |
||||
} |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,26 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.grpc_http1_reverse_bridge.v3alpha; |
||||
|
||||
option java_outer_classname = "ConfigProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] |
||||
// gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview |
||||
// <config_http_filters_grpc_http1_reverse_bridge>`. |
||||
|
||||
// gRPC reverse bridge filter configuration |
||||
message FilterConfig { |
||||
// The content-type to pass to the upstream when the gRPC bridge filter is applied. |
||||
// The filter will also validate that the upstream responds with the same content type. |
||||
string content_type = 1 [(validate.rules).string.min_bytes = 1]; |
||||
|
||||
// If true, Envoy will assume that the upstream doesn't understand gRPC frames and |
||||
// strip the gRPC frame from the request, and add it back in to the response. This will |
||||
// hide the gRPC semantics from the upstream, allowing it to receive and respond with a |
||||
// simple binary encoded protobuf. |
||||
bool withhold_grpc_frames = 2; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,73 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.gzip.v3alpha; |
||||
|
||||
option java_outer_classname = "GzipProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v3alpha"; |
||||
|
||||
import "google/protobuf/wrappers.proto"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Gzip] |
||||
// Gzip :ref:`configuration overview <config_http_filters_gzip>`. |
||||
|
||||
message Gzip { |
||||
enum CompressionStrategy { |
||||
DEFAULT = 0; |
||||
FILTERED = 1; |
||||
HUFFMAN = 2; |
||||
RLE = 3; |
||||
} |
||||
|
||||
message CompressionLevel { |
||||
enum Enum { |
||||
DEFAULT = 0; |
||||
BEST = 1; |
||||
SPEED = 2; |
||||
} |
||||
} |
||||
|
||||
// Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values |
||||
// use more memory, but are faster and produce better compression results. The default value is 5. |
||||
google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; |
||||
|
||||
// Minimum response length, in bytes, which will trigger compression. The default value is 30. |
||||
google.protobuf.UInt32Value content_length = 2 [(validate.rules).uint32 = {gte: 30}]; |
||||
|
||||
// A value used for selecting the zlib compression level. This setting will affect speed and |
||||
// amount of compression applied to the content. "BEST" provides higher compression at the cost of |
||||
// higher latency, "SPEED" provides lower compression with minimum impact on response time. |
||||
// "DEFAULT" provides an optimal result between speed and compression. This field will be set to |
||||
// "DEFAULT" if not specified. |
||||
CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; |
||||
|
||||
// A value used for selecting the zlib compression strategy which is directly related to the |
||||
// characteristics of the content. Most of the time "DEFAULT" will be the best choice, though |
||||
// there are situations which changing this parameter might produce better results. For example, |
||||
// run-length encoding (RLE) is typically used when the content is known for having sequences |
||||
// which same data occurs many consecutive times. For more information about each strategy, please |
||||
// refer to zlib manual. |
||||
CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; |
||||
|
||||
// Set of strings that allows specifying which mime-types yield compression; e.g., |
||||
// application/json, text/html, etc. When this field is not defined, compression will be applied |
||||
// to the following mime-types: "application/javascript", "application/json", |
||||
// "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". |
||||
repeated string content_type = 6 [(validate.rules).repeated = {max_items: 50}]; |
||||
|
||||
// If true, disables compression when the response contains an etag header. When it is false, the |
||||
// filter will preserve weak etags and remove the ones that require strong validation. |
||||
bool disable_on_etag_header = 7; |
||||
|
||||
// If true, removes accept-encoding from the request headers before dispatching it to the upstream |
||||
// so that responses do not get compressed before reaching the filter. |
||||
bool remove_accept_encoding_header = 8; |
||||
|
||||
// Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. |
||||
// Larger window results in better compression at the expense of memory usage. The default is 12 |
||||
// which will produce a 4096 bytes window. For more details about this parameter, please refer to |
||||
// zlib manual > deflateInit2. |
||||
google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; |
||||
} |
@ -1,92 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.header_to_metadata.v3alpha; |
||||
|
||||
option java_outer_classname = "HeaderToMetadataProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Header-To-Metadata Filter] |
||||
// |
||||
// The configuration for transforming headers into metadata. This is useful |
||||
// for matching load balancer subsets, logging, etc. |
||||
// |
||||
// Header to Metadata :ref:`configuration overview <config_http_filters_header_to_metadata>`. |
||||
|
||||
message Config { |
||||
enum ValueType { |
||||
STRING = 0; |
||||
|
||||
NUMBER = 1; |
||||
|
||||
// The value is a serialized `protobuf.Value |
||||
// <https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/struct.proto#L62>`_. |
||||
PROTOBUF_VALUE = 2; |
||||
} |
||||
|
||||
// ValueEncode defines the encoding algorithm. |
||||
enum ValueEncode { |
||||
// The value is not encoded. |
||||
NONE = 0; |
||||
|
||||
// The value is encoded in `Base64 <https://tools.ietf.org/html/rfc4648#section-4>`_. |
||||
// Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the |
||||
// non-ASCII characters in the header. |
||||
BASE64 = 1; |
||||
} |
||||
|
||||
message KeyValuePair { |
||||
// The namespace — if this is empty, the filter's namespace will be used. |
||||
string metadata_namespace = 1; |
||||
|
||||
// The key to use within the namespace. |
||||
string key = 2 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// The value to pair with the given key. |
||||
// |
||||
// When used for a `on_header_present` case, if value is non-empty it'll be used |
||||
// instead of the header value. If both are empty, no metadata is added. |
||||
// |
||||
// When used for a `on_header_missing` case, a non-empty value must be provided |
||||
// otherwise no metadata is added. |
||||
string value = 3; |
||||
|
||||
// The value's type — defaults to string. |
||||
ValueType type = 4; |
||||
|
||||
// How is the value encoded, default is NONE (not encoded). |
||||
// The value will be decoded accordingly before storing to metadata. |
||||
ValueEncode encode = 5; |
||||
} |
||||
|
||||
// A Rule defines what metadata to apply when a header is present or missing. |
||||
message Rule { |
||||
// The header that triggers this rule — required. |
||||
string header = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// If the header is present, apply this metadata KeyValuePair. |
||||
// |
||||
// If the value in the KeyValuePair is non-empty, it'll be used instead |
||||
// of the header value. |
||||
KeyValuePair on_header_present = 2; |
||||
|
||||
// If the header is not present, apply this metadata KeyValuePair. |
||||
// |
||||
// The value in the KeyValuePair must be set, since it'll be used in lieu |
||||
// of the missing header value. |
||||
KeyValuePair on_header_missing = 3; |
||||
|
||||
// Whether or not to remove the header after a rule is applied. |
||||
// |
||||
// This prevents headers from leaking. |
||||
bool remove = 4; |
||||
} |
||||
|
||||
// The list of rules to apply to requests. |
||||
repeated Rule request_rules = 1; |
||||
|
||||
// The list of rules to apply to responses. |
||||
repeated Rule response_rules = 2; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,20 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.lua.v3alpha; |
||||
|
||||
option java_outer_classname = "LuaProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Lua] |
||||
// Lua :ref:`configuration overview <config_http_filters_lua>`. |
||||
|
||||
message Lua { |
||||
// The Lua code that Envoy will execute. This can be a very small script that |
||||
// further loads code from disk if desired. Note that if JSON configuration is used, the code must |
||||
// be properly escaped. YAML configuration may be easier to read since YAML supports multi-line |
||||
// strings so complex scripts can be easily expressed inline in the configuration. |
||||
string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,22 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.original_src.v3alpha; |
||||
|
||||
option java_outer_classname = "OriginalSrcProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Original Src Filter] |
||||
// Use the Original source address on upstream connections. |
||||
|
||||
// The Original Src filter binds upstream connections to the original source address determined |
||||
// for the request. This address could come from something like the Proxy Protocol filter, or it |
||||
// could come from trusted http headers. |
||||
message OriginalSrc { |
||||
// Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to |
||||
// ensure that non-local addresses may be routed back through envoy when binding to the original |
||||
// source address. The option will not be applied if the mark is 0. |
||||
uint32 mark = 1; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,52 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.squash.v3alpha; |
||||
|
||||
option java_outer_classname = "SquashProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v3alpha"; |
||||
|
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/struct.proto"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Squash] |
||||
// Squash :ref:`configuration overview <config_http_filters_squash>`. |
||||
|
||||
message Squash { |
||||
// The name of the cluster that hosts the Squash server. |
||||
string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// When the filter requests the Squash server to create a DebugAttachment, it will use this |
||||
// structure as template for the body of the request. It can contain reference to environment |
||||
// variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server |
||||
// with more information to find the process to attach the debugger to. For example, in a |
||||
// Istio/k8s environment, this will contain information on the pod: |
||||
// |
||||
// .. code-block:: json |
||||
// |
||||
// { |
||||
// "spec": { |
||||
// "attachment": { |
||||
// "pod": "{{ POD_NAME }}", |
||||
// "namespace": "{{ POD_NAMESPACE }}" |
||||
// }, |
||||
// "match_request": true |
||||
// } |
||||
// } |
||||
// |
||||
// (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) |
||||
google.protobuf.Struct attachment_template = 2; |
||||
|
||||
// The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. |
||||
google.protobuf.Duration request_timeout = 3; |
||||
|
||||
// The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 |
||||
// seconds. |
||||
google.protobuf.Duration attachment_timeout = 4; |
||||
|
||||
// Amount of time to poll for the status of the attachment object in the Squash server |
||||
// (to check if has been attached). Defaults to 1 second. |
||||
google.protobuf.Duration attachment_poll_period = 5; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,152 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.http.transcoder.v3alpha; |
||||
|
||||
option java_outer_classname = "TranscoderProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: gRPC-JSON transcoder] |
||||
// gRPC-JSON transcoder :ref:`configuration overview <config_http_filters_grpc_json_transcoder>`. |
||||
|
||||
message GrpcJsonTranscoder { |
||||
message PrintOptions { |
||||
// Whether to add spaces, line breaks and indentation to make the JSON |
||||
// output easy to read. Defaults to false. |
||||
bool add_whitespace = 1; |
||||
|
||||
// Whether to always print primitive fields. By default primitive |
||||
// fields with default values will be omitted in JSON output. For |
||||
// example, an int32 field set to 0 will be omitted. Setting this flag to |
||||
// true will override the default behavior and print primitive fields |
||||
// regardless of their values. Defaults to false. |
||||
bool always_print_primitive_fields = 2; |
||||
|
||||
// Whether to always print enums as ints. By default they are rendered |
||||
// as strings. Defaults to false. |
||||
bool always_print_enums_as_ints = 3; |
||||
|
||||
// Whether to preserve proto field names. By default protobuf will |
||||
// generate JSON field names using the ``json_name`` option, or lower camel case, |
||||
// in that order. Setting this flag will preserve the original field names. Defaults to false. |
||||
bool preserve_proto_field_names = 4; |
||||
} |
||||
|
||||
oneof descriptor_set { |
||||
option (validate.required) = true; |
||||
|
||||
// Supplies the filename of |
||||
// :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC |
||||
// services. |
||||
string proto_descriptor = 1; |
||||
|
||||
// Supplies the binary content of |
||||
// :ref:`the proto descriptor set <config_grpc_json_generate_proto_descriptor_set>` for the gRPC |
||||
// services. |
||||
bytes proto_descriptor_bin = 4; |
||||
} |
||||
|
||||
// A list of strings that |
||||
// supplies the fully qualified service names (i.e. "package_name.service_name") that |
||||
// the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, |
||||
// Envoy will fail at startup. The ``proto_descriptor`` may contain more services than |
||||
// the service names specified here, but they won't be translated. |
||||
repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; |
||||
|
||||
// Control options for response JSON. These options are passed directly to |
||||
// `JsonPrintOptions <https://developers.google.com/protocol-buffers/docs/reference/cpp/ |
||||
// google.protobuf.util.json_util#JsonPrintOptions>`_. |
||||
PrintOptions print_options = 3; |
||||
|
||||
// Whether to keep the incoming request route after the outgoing headers have been transformed to |
||||
// the match the upstream gRPC service. Note: This means that routes for gRPC services that are |
||||
// not transcoded cannot be used in combination with *match_incoming_request_route*. |
||||
bool match_incoming_request_route = 5; |
||||
|
||||
// A list of query parameters to be ignored for transcoding method mapping. |
||||
// By default, the transcoder filter will not transcode a request if there are any |
||||
// unknown/invalid query parameters. |
||||
// |
||||
// Example : |
||||
// |
||||
// .. code-block:: proto |
||||
// |
||||
// service Bookstore { |
||||
// rpc GetShelf(GetShelfRequest) returns (Shelf) { |
||||
// option (google.api.http) = { |
||||
// get: "/shelves/{shelf}" |
||||
// }; |
||||
// } |
||||
// } |
||||
// |
||||
// message GetShelfRequest { |
||||
// int64 shelf = 1; |
||||
// } |
||||
// |
||||
// message Shelf {} |
||||
// |
||||
// The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable |
||||
// binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow |
||||
// the same request to be mapped to ``GetShelf``. |
||||
repeated string ignored_query_parameters = 6; |
||||
|
||||
// Whether to route methods without the ``google.api.http`` option. |
||||
// |
||||
// Example : |
||||
// |
||||
// .. code-block:: proto |
||||
// |
||||
// package bookstore; |
||||
// |
||||
// service Bookstore { |
||||
// rpc GetShelf(GetShelfRequest) returns (Shelf) {} |
||||
// } |
||||
// |
||||
// message GetShelfRequest { |
||||
// int64 shelf = 1; |
||||
// } |
||||
// |
||||
// message Shelf {} |
||||
// |
||||
// The client could ``post`` a json body ``{"shelf": 1234}`` with the path of |
||||
// ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. |
||||
bool auto_mapping = 7; |
||||
|
||||
// Whether to ignore query parameters that cannot be mapped to a corresponding |
||||
// protobuf field. Use this if you cannot control the query parameters and do |
||||
// not know them beforehand. Otherwise use ``ignored_query_parameters``. |
||||
// Defaults to false. |
||||
bool ignore_unknown_query_parameters = 8; |
||||
|
||||
// Whether to convert gRPC status headers to JSON. |
||||
// When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` |
||||
// from the ``grpc-status-details-bin`` header and use it as JSON body. |
||||
// If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and |
||||
// ``grpc-message`` headers. |
||||
// The error details types must be present in the ``proto_descriptor``. |
||||
// |
||||
// For example, if an upstream server replies with headers: |
||||
// |
||||
// .. code-block:: none |
||||
// |
||||
// grpc-status: 5 |
||||
// grpc-status-details-bin: |
||||
// CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ |
||||
// |
||||
// The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message |
||||
// ``google.rpc.Status``. It will be transcoded into: |
||||
// |
||||
// .. code-block:: none |
||||
// |
||||
// HTTP/1.1 404 Not Found |
||||
// content-type: application/json |
||||
// |
||||
// {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} |
||||
// |
||||
// In order to transcode the message, the ``google.rpc.RequestInfo`` type from |
||||
// the ``google/rpc/error_details.proto`` should be included in the configured |
||||
// :ref:`proto descriptor set <config_grpc_json_generate_proto_descriptor_set>`. |
||||
bool convert_grpc_status = 9; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,26 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.listener.original_src.v3alpha; |
||||
|
||||
option java_outer_classname = "OriginalSrcProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Original Src Filter] |
||||
// Use the Original source address on upstream connections. |
||||
|
||||
// The Original Src filter binds upstream connections to the original source address determined |
||||
// for the connection. This address could come from something like the Proxy Protocol filter, or it |
||||
// could come from trusted http headers. |
||||
message OriginalSrc { |
||||
// Whether to bind the port to the one used in the original downstream connection. |
||||
// [#not-implemented-hide:] |
||||
bool bind_port = 1; |
||||
|
||||
// Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to |
||||
// ensure that non-local addresses may be routed back through envoy when binding to the original |
||||
// source address. The option will not be applied if the mark is 0. |
||||
uint32 mark = 2; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,13 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.filter.thrift.router.v3alpha; |
||||
|
||||
option java_outer_classname = "RouterProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v3alpha"; |
||||
|
||||
// [#protodoc-title: Router] |
||||
// Thrift router :ref:`configuration overview <config_thrift_filters_router>`. |
||||
|
||||
message Router { |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,18 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.health_checker.redis.v3alpha; |
||||
|
||||
option java_outer_classname = "RedisProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v3alpha"; |
||||
|
||||
// [#protodoc-title: Redis] |
||||
// Redis health checker :ref:`configuration overview <config_health_checkers_redis>`. |
||||
|
||||
message Redis { |
||||
// If set, optionally perform ``EXISTS <key>`` instead of ``PING``. A return value |
||||
// from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other |
||||
// than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance |
||||
// by setting the specified key to any value and waiting for traffic to drain. |
||||
string key = 1; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,24 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.listener.v3alpha; |
||||
|
||||
option java_outer_classname = "ApiListenerProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.listener.v3alpha"; |
||||
|
||||
import "google/protobuf/any.proto"; |
||||
|
||||
// [#not-implemented-hide:] |
||||
// Describes a type of API listener, which is used in non-proxy clients. The type of API |
||||
// exposed to the non-proxy application depends on the type of API listener. |
||||
message ApiListener { |
||||
// The type in this field determines the type of API listener. At present, the following |
||||
// types are supported: |
||||
// envoy.config.filter.network.http_connection_manager.v3alpha.HttpConnectionManager (HTTP) |
||||
// [#next-major-version: In the v3 API, replace this Any field with a oneof containing the |
||||
// specific config message for each type of API listener. We could not do this in v2 because |
||||
// it would have caused circular dependencies for go protos: lds.proto depends on this file, |
||||
// and http_connection_manager.proto depends on rds.proto, which is in the same directory as |
||||
// lds.proto, so lds.proto cannot depend on this file.] |
||||
google.protobuf.Any api_listener = 1; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,78 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.overload.v3alpha; |
||||
|
||||
option java_outer_classname = "OverloadProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.overload.v3alpha"; |
||||
|
||||
import "google/protobuf/any.proto"; |
||||
import "google/protobuf/duration.proto"; |
||||
import "google/protobuf/struct.proto"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Overload Manager] |
||||
|
||||
// The Overload Manager provides an extensible framework to protect Envoy instances |
||||
// from overload of various resources (memory, cpu, file descriptors, etc). |
||||
// It monitors a configurable set of resources and notifies registered listeners |
||||
// when triggers related to those resources fire. |
||||
|
||||
message ResourceMonitor { |
||||
// The name of the resource monitor to instantiate. Must match a registered |
||||
// resource monitor type. The built-in resource monitors are: |
||||
// |
||||
// * :ref:`envoy.resource_monitors.fixed_heap |
||||
// <envoy_api_msg_config.resource_monitor.fixed_heap.v3alpha.FixedHeapConfig>` |
||||
// * :ref:`envoy.resource_monitors.injected_resource |
||||
// <envoy_api_msg_config.resource_monitor.injected_resource.v3alpha.InjectedResourceConfig>` |
||||
string name = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// Configuration for the resource monitor being instantiated. |
||||
oneof config_type { |
||||
google.protobuf.Struct config = 2; |
||||
|
||||
google.protobuf.Any typed_config = 3; |
||||
} |
||||
} |
||||
|
||||
message ThresholdTrigger { |
||||
// If the resource pressure is greater than or equal to this value, the trigger |
||||
// will fire. |
||||
double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; |
||||
} |
||||
|
||||
message Trigger { |
||||
// The name of the resource this is a trigger for. |
||||
string name = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
oneof trigger_oneof { |
||||
option (validate.required) = true; |
||||
|
||||
ThresholdTrigger threshold = 2; |
||||
} |
||||
} |
||||
|
||||
message OverloadAction { |
||||
// The name of the overload action. This is just a well-known string that listeners can |
||||
// use for registering callbacks. Custom overload actions should be named using reverse |
||||
// DNS to ensure uniqueness. |
||||
string name = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// A set of triggers for this action. If any of these triggers fire the overload action |
||||
// is activated. Listeners are notified when the overload action transitions from |
||||
// inactivated to activated, or vice versa. |
||||
repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; |
||||
} |
||||
|
||||
message OverloadManager { |
||||
// The interval for refreshing resource usage. |
||||
google.protobuf.Duration refresh_interval = 1; |
||||
|
||||
// The set of resources to monitor. |
||||
repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; |
||||
|
||||
// The set of overload actions. |
||||
repeated OverloadAction actions = 3; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,18 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.resource_monitor.fixed_heap.v3alpha; |
||||
|
||||
option java_outer_classname = "FixedHeapProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Fixed heap] |
||||
|
||||
// The fixed heap resource monitor reports the Envoy process memory pressure, computed as a |
||||
// fraction of currently reserved heap memory divided by a statically configured maximum |
||||
// specified in the FixedHeapConfig. |
||||
message FixedHeapConfig { |
||||
uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,19 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.resource_monitor.injected_resource.v3alpha; |
||||
|
||||
option java_outer_classname = "InjectedResourceProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v3alpha"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Injected resource] |
||||
|
||||
// The injected resource monitor allows injecting a synthetic resource pressure into Envoy |
||||
// via a text file, which must contain a floating-point number in the range [0..1] representing |
||||
// the resource pressure and be updated atomically by a symbolic link swap. |
||||
// This is intended primarily for integration tests to force Envoy into an overloaded state. |
||||
message InjectedResourceConfig { |
||||
string filename = 1 [(validate.rules).string = {min_bytes: 1}]; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,23 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.config.transport_socket.alts.v3alpha; |
||||
|
||||
option java_outer_classname = "AltsProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v3alpha"; |
||||
|
||||
// [#protodoc-title: ALTS] |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. |
||||
// https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ |
||||
message Alts { |
||||
// The location of a handshaker service, this is usually 169.254.169.254:8080 |
||||
// on GCE. |
||||
string handshaker_service = 1 [(validate.rules).string.min_bytes = 1]; |
||||
|
||||
// The acceptable service accounts from peer, peers not in the list will be rejected in the |
||||
// handshake validation step. If empty, no validation will be performed. |
||||
repeated string peer_service_accounts = 2; |
||||
} |
@ -1,7 +0,0 @@ |
||||
# DO NOT EDIT. This file is generated by tools/proto_sync.py. |
||||
|
||||
load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") |
||||
|
||||
licenses(["notice"]) # Apache 2 |
||||
|
||||
api_proto_package() |
@ -1,130 +0,0 @@ |
||||
syntax = "proto3"; |
||||
|
||||
package envoy.data.cluster.v3alpha; |
||||
|
||||
option java_outer_classname = "OutlierDetectionEventProto"; |
||||
option java_multiple_files = true; |
||||
option java_package = "io.envoyproxy.envoy.data.cluster.v3alpha"; |
||||
|
||||
import "google/protobuf/timestamp.proto"; |
||||
import "google/protobuf/wrappers.proto"; |
||||
|
||||
import "validate/validate.proto"; |
||||
|
||||
// [#protodoc-title: Outlier detection logging events] |
||||
// :ref:`Outlier detection logging <arch_overview_outlier_detection_logging>`. |
||||
|
||||
// Type of ejection that took place |
||||
enum OutlierEjectionType { |
||||
// In case upstream host returns certain number of consecutive 5xx. |
||||
// If |
||||
// :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>` |
||||
// is *false*, all type of errors are treated as HTTP 5xx errors. |
||||
// See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for |
||||
// details. |
||||
CONSECUTIVE_5XX = 0; |
||||
|
||||
// In case upstream host returns certain number of consecutive gateway errors |
||||
CONSECUTIVE_GATEWAY_FAILURE = 1; |
||||
|
||||
// Runs over aggregated success rate statistics from every host in cluster |
||||
// and selects hosts for which ratio of successful replies deviates from other hosts |
||||
// in the cluster. |
||||
// If |
||||
// :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>` |
||||
// is *false*, all errors (externally and locally generated) are used to calculate success rate |
||||
// statistics. See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` |
||||
// documentation for details. |
||||
SUCCESS_RATE = 2; |
||||
|
||||
// Consecutive local origin failures: Connection failures, resets, timeouts, etc |
||||
// This type of ejection happens only when |
||||
// :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>` |
||||
// is set to *true*. |
||||
// See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for |
||||
CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; |
||||
|
||||
// Runs over aggregated success rate statistics for local origin failures |
||||
// for all hosts in the cluster and selects hosts for which success rate deviates from other |
||||
// hosts in the cluster. This type of ejection happens only when |
||||
// :ref:`outlier_detection.split_external_local_origin_errors<envoy_api_field_cluster.OutlierDetection.split_external_local_origin_errors>` |
||||
// is set to *true*. |
||||
// See :ref:`Cluster outlier detection <arch_overview_outlier_detection>` documentation for |
||||
SUCCESS_RATE_LOCAL_ORIGIN = 4; |
||||
|
||||
// Runs over aggregated success rate statistics from every host in cluster and selects hosts for |
||||
// which ratio of failed replies is above configured value. |
||||
FAILURE_PERCENTAGE = 5; |
||||
|
||||
// Runs over aggregated success rate statistics for local origin failures from every host in |
||||
// cluster and selects hosts for which ratio of failed replies is above configured value. |
||||
FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; |
||||
} |
||||
|
||||
// Represents possible action applied to upstream host |
||||
enum Action { |
||||
// In case host was excluded from service |
||||
EJECT = 0; |
||||
|
||||
// In case host was brought back into service |
||||
UNEJECT = 1; |
||||
} |
||||
|
||||
message OutlierDetectionEvent { |
||||
// In case of eject represents type of ejection that took place. |
||||
OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; |
||||
|
||||
// Timestamp for event. |
||||
google.protobuf.Timestamp timestamp = 2; |
||||
|
||||
// The time in seconds since the last action (either an ejection or unejection) took place. |
||||
google.protobuf.UInt64Value secs_since_last_action = 3; |
||||
|
||||
// The :ref:`cluster <envoy_api_msg_Cluster>` that owns the ejected host. |
||||
string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. |
||||
string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; |
||||
|
||||
// The action that took place. |
||||
Action action = 6 [(validate.rules).enum = {defined_only: true}]; |
||||
|
||||
// If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to |
||||
// that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and |
||||
// then re-added). |
||||
uint32 num_ejections = 7; |
||||
|
||||
// If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was |
||||
// ejected. ``false`` means the event was logged but the host was not actually ejected. |
||||
bool enforced = 8; |
||||
|
||||
oneof event { |
||||
option (validate.required) = true; |
||||
|
||||
OutlierEjectSuccessRate eject_success_rate_event = 9; |
||||
|
||||
OutlierEjectConsecutive eject_consecutive_event = 10; |
||||
|
||||
OutlierEjectFailurePercentage eject_failure_percentage_event = 11; |
||||
} |
||||
} |
||||
|
||||
message OutlierEjectSuccessRate { |
||||
// Host’s success rate at the time of the ejection event on a 0-100 range. |
||||
uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; |
||||
|
||||
// Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 |
||||
// range. |
||||
uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; |
||||
|
||||
// Success rate ejection threshold at the time of the ejection event. |
||||
uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; |
||||
} |
||||
|
||||
message OutlierEjectConsecutive { |
||||
} |
||||
|
||||
message OutlierEjectFailurePercentage { |
||||
// Host's success rate at the time of the ejection event on a 0-100 range. |
||||
uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue