adding check_format and fix_format (#300)

Signed-off-by: Alyssa Wilk <alyssar@chromium.org>
pull/246/head
alyssawilk 7 years ago committed by Matt Klein
parent b227517de0
commit 99dab97a47
  1. 9
      .circleci/config.yml
  2. 2
      .clang-format
  3. 4
      api/BUILD
  4. 2
      api/base.proto
  5. 1
      api/bootstrap.proto
  6. 9
      api/cds.proto
  7. 3
      api/discovery.proto
  8. 22
      api/eds.proto
  9. 2
      api/filter/accesslog/BUILD
  10. 16
      api/filter/accesslog/accesslog.proto
  11. 3
      api/filter/fault.proto
  12. 2
      api/filter/http/BUILD
  13. 6
      api/filter/http/buffer.proto
  14. 5
      api/filter/http/fault.proto
  15. 2
      api/filter/http/transcoder.proto
  16. 4
      api/filter/network/BUILD
  17. 6
      api/filter/network/http_connection_manager.proto
  18. 3
      api/filter/network/tcp_proxy.proto
  19. 3
      api/hds.proto
  20. 3
      api/health_check.proto
  21. 27
      api/lds.proto
  22. 3
      api/metrics_service.proto
  23. 27
      api/rds.proto
  24. 7
      api/rls.proto
  25. 32
      api/sds.proto
  26. 25
      bazel/api_build_system.bzl
  27. 2
      test/build/BUILD
  28. 2
      test/build/build_test.cc
  29. 8
      test/validate/pgv_test.cc
  30. 0
      tools/check_format.py
  31. 4
      tools/protodoc/BUILD

@ -20,6 +20,14 @@ jobs:
- run: docs/publish.sh
- store_artifacts:
path: generated/docs
format:
docker:
- image: lyft/envoy-build:114e24c6fd05fc026492e9d2ca5608694e5ea59d
resource_class: xlarge
working_directory: /source
steps:
- checkout
- run: ci/do_ci.sh check_format
workflows:
version: 2
@ -27,3 +35,4 @@ workflows:
jobs:
- test
- docs
- format

@ -10,5 +10,7 @@ SortIncludes: false
---
Language: Proto
ColumnLimit: 100
SpacesInContainerLiterals: false
AllowShortFunctionsOnASingleLine: false
...

@ -96,11 +96,11 @@ api_proto_library(
name = "metrics",
srcs = ["metrics_service.proto"],
has_services = 1,
require_py = 0,
deps = [
":base",
"@promotheus_metrics_model//:client_model",
],
require_py = 0,
)
api_proto_library(
@ -146,7 +146,7 @@ proto_library(
":lds",
":protocol",
":rds",
"//api/filter/accesslog:accesslog",
"//api/filter/accesslog",
"//api/filter/http:buffer",
"//api/filter/http:fault",
"//api/filter/http:health_check",

@ -152,7 +152,7 @@ message ApiConfigSource {
ApiType api_type = 1;
// Multiple cluster names may be provided. If > 1 cluster is defined, clusters
// will be cycled through if any kind of failure occurs.
repeated string cluster_name = 2 [(validate.rules).repeated.min_items = 1];
repeated string cluster_name = 2 [(validate.rules).repeated .min_items = 1];
// For REST APIs, the delay between successive polls.
google.protobuf.Duration refresh_delay = 3;
}

@ -250,7 +250,6 @@ message Watchdog {
// kill behavior. If not specified the default is 0 (disabled).
google.protobuf.Duration kill_timeout = 3;
// If at least two watched threads have been nonresponsive for at least this
// duration assume a true deadlock and kill the entire Envoy process. Set to 0
// to disable this behavior. If not specified the default is 0 (disabled).

@ -20,12 +20,10 @@ import "validate/validate.proto";
// Return list of all clusters this proxy will load balance to.
service ClusterDiscoveryService {
rpc StreamClusters(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchClusters(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:clusters"
body: "*"
@ -319,7 +317,8 @@ message Cluster {
// The % chance that a host will be actually ejected when an outlier status
// is detected through consecutive gateway failures. This setting can be
// used to disable ejection or to ramp it up slowly. Defaults to 0.
google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 [(validate.rules).uint32.lte = 100];
google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11
[(validate.rules).uint32.lte = 100];
}
// If specified, outlier detection will be enabled for this upstream cluster.

@ -16,8 +16,7 @@ import "google/protobuf/any.proto";
// the multiplexed singleton APIs at the Envoy instance and management server.
service AggregatedDiscoveryService {
// This is a gRPC-only API.
rpc StreamAggregatedResources(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
}

@ -17,12 +17,10 @@ import "validate/validate.proto";
service EndpointDiscoveryService {
// The resource_names field in DiscoveryRequest specifies a list of clusters
// to subscribe to updates for.
rpc StreamEndpoints(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchEndpoints(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:endpoints"
body: "*"
@ -57,8 +55,7 @@ service EndpointDiscoveryService {
// 6. The management server uses the load reports from all reported Envoys
// from around the world, computes global assignment and prepares traffic
// assignment destined for each zone Envoys are located in. Goto 2.
rpc StreamLoadStats(stream LoadStatsRequest)
returns (stream LoadStatsResponse) {
rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {
}
}
@ -93,7 +90,8 @@ message LbEndpoint {
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte:1, lte:128}];
google.protobuf.UInt32Value load_balancing_weight = 4
[(validate.rules).uint32 = {gte: 1, lte: 128}];
}
// A group of endpoints belonging to a Locality.
@ -123,7 +121,8 @@ message LocalityLbEndpoints {
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte:1, lte:128}];
google.protobuf.UInt32Value load_balancing_weight = 3
[(validate.rules).uint32 = {gte: 1, lte: 128}];
// Optional: the priority for this LocalityLbEndpoints. If unspecified this will
// default to the highest priority (0).
@ -207,7 +206,8 @@ message ClusterStats {
string cluster_name = 1 [(validate.rules).string.min_bytes = 1];
// Need at least one.
repeated UpstreamLocalityStats upstream_locality_stats = 2 [(validate.rules).repeated.min_items = 1];
repeated UpstreamLocalityStats upstream_locality_stats = 2
[(validate.rules).repeated .min_items = 1];
// Cluster-level stats such as total_successful_requests may be computed by
// summing upstream_locality_stats. In addition, below there are additional
@ -259,7 +259,7 @@ message ClusterLoadAssignment {
// recover from an outage or should they be unable to autoscale and hence
// overall incoming traffic volume need to be trimmed to protect them.
// [#v2-api-diff: This is known as maintenance mode in v1.]
double drop_overload = 1 [(validate.rules).double = {gte:0, lte: 100}];
double drop_overload = 1 [(validate.rules).double = {gte: 0, lte: 100}];
}
// Load balancing policy settings.
@ -271,7 +271,7 @@ message ClusterLoadAssignment {
// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
message LoadStatsResponse {
// Clusters to report stats for.
repeated string clusters = 1 [(validate.rules).repeated.min_items = 1];
repeated string clusters = 1 [(validate.rules).repeated .min_items = 1];
// The interval of time to collect stats. The default is 10 seconds.
google.protobuf.Duration load_reporting_interval = 2;

@ -6,6 +6,6 @@ api_proto_library(
has_services = 1,
deps = [
"//api:address",
"//api:base"
"//api:base",
],
)

@ -275,11 +275,13 @@ message DurationFilter {
// Filters for requests that are not health check requests. A health check
// request is marked by the health check filter.
message NotHealthCheckFilter {}
message NotHealthCheckFilter {
}
// Filters for requests that are traceable. See the tracing overview for more
// information on how a request becomes traceable.
message TraceableFilter {}
message TraceableFilter {
}
// Filters for random sampling of requests. Sampling pivots on the header
// :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being present. If
@ -297,14 +299,14 @@ message RuntimeFilter {
// Filters are evaluated sequentially and if one of them returns false, the
// filter returns false immediately.
message AndFilter {
repeated AccessLogFilter filters = 1 [(validate.rules).repeated.min_items = 2];
repeated AccessLogFilter filters = 1 [(validate.rules).repeated .min_items = 2];
}
// Performs a logical or operation on the result of each individual filter.
// Filters are evaluated sequentially and if one of them returns true, the
// filter returns true immediately.
message OrFilter {
repeated AccessLogFilter filters = 2 [(validate.rules).repeated.min_items = 2];
repeated AccessLogFilter filters = 2 [(validate.rules).repeated .min_items = 2];
}
message AccessLogFilter {
@ -396,7 +398,8 @@ message StreamAccessLogsMessage {
// Empty response for the StreamAccessLogs API. Will never be sent. See below.
// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
message StreamAccessLogsResponse {}
message StreamAccessLogsResponse {
}
// Service for streaming access logs from Envoy to an access log server.
service AccessLogService {
@ -406,8 +409,7 @@ service AccessLogService {
// API for "critical" access logs in which Envoy will buffer access logs for some period of time
// until it gets an ACK so it could then retry. This API is designed for high throughput with the
// expectation that it might be lossy.
rpc StreamAccessLogs(stream StreamAccessLogsMessage)
returns (StreamAccessLogsResponse) {
rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {
}
}

@ -17,7 +17,8 @@ message FaultDelay {
FIXED = 0;
}
// Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is supported.
// Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is
// supported.
FaultDelayType type = 1 [(validate.rules).enum.defined_only = true];
// An integer between 0-100 indicating the percentage of operations/connection requests

@ -5,7 +5,7 @@ licenses(["notice"]) # Apache 2
api_proto_library(
name = "router",
srcs = ["router.proto"],
deps = ["//api/filter/accesslog:accesslog"],
deps = ["//api/filter/accesslog"],
)
api_proto_library(

@ -17,8 +17,6 @@ message Buffer {
// The maximum number of seconds that the filter will wait for a complete
// request before returning a 408 response.
google.protobuf.Duration max_request_time = 2 [(validate.rules).duration = {
required: true,
gt: {}
}];
google.protobuf.Duration max_request_time = 2
[(validate.rules).duration = {required: true, gt: {}}];
}

@ -19,10 +19,7 @@ message FaultAbort {
option (validate.required) = true;
// HTTP status code to use to abort the HTTP request.
uint32 http_status = 2 [(validate.rules).uint32 = {
gte: 200,
lt: 600
}];
uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}];
}
}

@ -35,7 +35,7 @@ message GrpcJsonTranscoder {
// transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, Envoy
// will fail at startup. The ``proto_descriptor`` may contain more services than the service names
// specified here, but they won't be translated.
repeated string services = 2 [(validate.rules).repeated.min_items = 1];
repeated string services = 2 [(validate.rules).repeated .min_items = 1];
message PrintOptions {
// Whether to add spaces, line breaks and indentation to make the JSON

@ -9,7 +9,7 @@ api_proto_library(
"//api:base",
"//api:protocol",
"//api:rds",
"//api/filter/accesslog:accesslog",
"//api/filter/accesslog",
],
)
@ -23,8 +23,8 @@ api_proto_library(
name = "tcp_proxy",
srcs = ["tcp_proxy.proto"],
deps = [
"//api/filter/accesslog:accesslog",
"//api:address",
"//api/filter/accesslog",
],
)

@ -173,9 +173,9 @@ message HttpConnectionManager {
google.protobuf.BoolValue use_remote_address = 14;
// Whether the connection manager will generate the :ref:`x-request-id
// <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to true.
// Generating a random UUID4 is expensive so in high throughput scenarios where this feature is
// not desired it can be disabled.
// <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to
// true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature
// is not desired it can be disabled.
google.protobuf.BoolValue generate_request_id = 15;
// How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP

@ -13,7 +13,8 @@ import "validate/validate.proto";
// [#protodoc-title: TCP Proxy]
// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.
// [#v2-api-diff: The route match now takes place in the :ref:`FilterChainMatch <envoy_api_msg_FilterChainMatch>` table].
// [#v2-api-diff: The route match now takes place in the :ref:`FilterChainMatch
// <envoy_api_msg_FilterChainMatch>` table].
message TcpProxy {
// The prefix to use when emitting :ref:`statistics

@ -56,8 +56,7 @@ service HealthDiscoveryService {
// TODO(htuch): Unlike the gRPC version, there is no stream-based binding of
// request/response. Should we add an identifier to the HealthCheckSpecifier
// to bind with the response?
rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse)
returns (HealthCheckSpecifier) {
rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {
option (google.api.http) = {
post: "/v2/discovery:health_check"
body: "*"

@ -85,7 +85,8 @@ message HealthCheck {
repeated Payload receive = 2;
}
message RedisHealthCheck {}
message RedisHealthCheck {
}
oneof health_checker {
option (validate.required) = true;

@ -21,12 +21,10 @@ import "validate/validate.proto";
// consist of a complete update of all listeners. Existing connections will be
// allowed to drain from listeners that are no longer present.
service ListenerDiscoveryService {
rpc StreamListeners(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchListeners(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:listeners"
body: "*"
@ -137,8 +135,9 @@ message Listener {
// The unique name by which this listener is known. If no name is provided,
// Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically
// updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.
// By default, the maximum length of a listener's name is limited to 60 characters. This limit can be
// increased by setting the :option:`--max-obj-name-len` command line argument to the desired value.
// By default, the maximum length of a listener's name is limited to 60 characters. This limit can
// be increased by setting the :option:`--max-obj-name-len` command line argument to the desired
// value.
string name = 1;
// The address that the listener should listen on. In general, the address must be unique, though
@ -156,16 +155,16 @@ message Listener {
// configured. See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more
// information. When multiple filter chains are configured, each filter chain must have an
// **identical** set of :ref:`filters <envoy_api_field_FilterChain.filters>`. If the filters
// differ, the configuration will fail to load. In the future, this limitation will be relaxed such that
// different filters can be used depending on which filter chain matches (based on SNI or
// some other parameter).
repeated FilterChain filter_chains = 3 [(validate.rules).repeated.min_items = 1];
// differ, the configuration will fail to load. In the future, this limitation will be relaxed
// such that different filters can be used depending on which filter chain matches (based on SNI
// or some other parameter).
repeated FilterChain filter_chains = 3 [(validate.rules).repeated .min_items = 1];
// If a connection is redirected using *iptables*, the port on which the proxy
// receives it might be different from the original destination address. When this flag is set to true,
// the listener hands off redirected connections to the listener associated with the original
// destination address. If there is no listener associated with the original destination address, the
// connection is handled by the listener that receives it. Defaults to false.
// receives it might be different from the original destination address. When this flag is set to
// true, the listener hands off redirected connections to the listener associated with the
// original destination address. If there is no listener associated with the original destination
// address, the connection is handled by the listener that receives it. Defaults to false.
google.protobuf.BoolValue use_original_dst = 4;
// Soft limit on size of the listeners new connection read and write buffers.

@ -18,7 +18,8 @@ service MetricsService {
}
}
message StreamMetricsResponse {}
message StreamMetricsResponse {
}
message StreamMetricsMessage {
message Identifier {

@ -20,12 +20,10 @@ import "validate/validate.proto";
// configurations. Each listener will bind its HTTP connection manager filter to
// a route table via this identifier.
service RouteDiscoveryService {
rpc StreamRoutes(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchRoutes(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:routes"
body: "*"
@ -57,7 +55,7 @@ message WeightedCluster {
}
// Specifies one or more upstream clusters associated with the route.
repeated ClusterWeight clusters = 1 [(validate.rules).repeated.min_items = 1];
repeated ClusterWeight clusters = 1 [(validate.rules).repeated .min_items = 1];
// Specifies the runtime key prefix that should be used to construct the
// runtime keys associated with each cluster. When the *runtime_key_prefix* is
@ -510,7 +508,8 @@ message RateLimit {
// ("source_cluster", "<local service cluster>")
//
// <local service cluster> is derived from the :option:`--service-cluster` option.
message SourceCluster {}
message SourceCluster {
}
// The following descriptor entry is appended to the descriptor:
//
@ -528,7 +527,8 @@ message RateLimit {
// chooses a cluster randomly from a set of clusters with attributed weight.
// * :ref:`cluster_header <envoy_api_field_RouteAction.cluster_header>` indicates which
// header in the request contains the target cluster.
message DestinationCluster {}
message DestinationCluster {
}
// The following descriptor entry is appended when a header contains a key that matches the
// *header_name*:
@ -552,7 +552,8 @@ message RateLimit {
// .. code-block:: cpp
//
// ("remote_address", "<trusted address from x-forwarded-for>")
message RemoteAddress {}
message RemoteAddress {
}
// The following descriptor entry is appended to the descriptor:
//
@ -584,7 +585,7 @@ message RateLimit {
// specified headers in the config. A match will happen if all the
// headers in the config are present in the request with the same values
// (or based on presence if the value field is not in the config).
repeated HeaderMatcher headers = 3 [(validate.rules).repeated.min_items = 1];
repeated HeaderMatcher headers = 3 [(validate.rules).repeated .min_items = 1];
}
oneof action_specifier {
@ -616,7 +617,7 @@ message RateLimit {
// cannot append a descriptor entry, no descriptor is generated for the
// configuration. See :ref:`composing actions
// <config_http_filters_rate_limit_composing_actions>` for additional documentation.
repeated Action actions = 3 [(validate.rules).repeated.min_items = 1];
repeated Action actions = 3 [(validate.rules).repeated .min_items = 1];
}
// .. attention::
@ -659,8 +660,8 @@ message HeaderMatcher {
// The top level element in the routing configuration is a virtual host. Each virtual host has
// a logical name as well as a set of domains that get routed to it based on the incoming request's
// host header. This allows a single listener to service multiple top level domain path trees. Once a
// virtual host is selected based on the domain, the routes are processed in order to see which
// host header. This allows a single listener to service multiple top level domain path trees. Once
// a virtual host is selected based on the domain, the routes are processed in order to see which
// upstream cluster to route to or whether to perform a redirect.
message VirtualHost {
// The logical name of the virtual host. This is used when emitting certain
@ -679,7 +680,7 @@ message VirtualHost {
// host/authority header. Only a single virtual host in the entire route
// configuration can match on *. A domain must be unique across all virtual
// hosts or the config will fail to load.
repeated string domains = 2 [(validate.rules).repeated.min_items = 1];
repeated string domains = 2 [(validate.rules).repeated .min_items = 1];
// The list of routes that will be matched, in order, for incoming requests.
// The first route that matches will be used.

@ -4,7 +4,8 @@ package envoy.api.v2;
service RateLimitService {
// Determine whether rate limiting should take place.
rpc ShouldRateLimit (RateLimitRequest) returns (RateLimitResponse) {}
rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {
}
}
// Main message for a rate limit request. The rate limit service is designed to be fully generic
@ -22,8 +23,8 @@ message RateLimitRequest {
// processed by the service (see below). If any of the descriptors are over limit, the entire
// request is considered to be over limit.
repeated RateLimitDescriptor descriptors = 2;
// Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the
// value is not set in the message, a request increases the matched limit by 1.
// Rate limit requests can optionally specify the number of hits a request adds to the matched
// limit. If the value is not set in the message, a request increases the matched limit by 1.
uint32 hits_addend = 3;
}

@ -12,13 +12,11 @@ import "validate/validate.proto";
// [#protodoc-title: Common TLS configuration]
service SecretDiscoveryService{
rpc StreamSecrets(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
service SecretDiscoveryService {
rpc StreamSecrets(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchSecrets(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchSecrets(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:secrets"
body: "*"
@ -128,25 +126,27 @@ message TlsSessionTicketKeys {
//
// .. attention::
//
// Using this feature has serious security considerations and risks. Improper handling of keys may
// result in loss of secrecy in connections, even if ciphers supporting perfect forward secrecy
// are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some discussion.
// To minimize the risk, you must:
// Using this feature has serious security considerations and risks. Improper handling of keys
// may result in loss of secrecy in connections, even if ciphers supporting perfect forward
// secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some
// discussion. To minimize the risk, you must:
//
// * Keep the session ticket keys at least as secure as your TLS certificate private keys
// * Rotate session ticket keys at least daily, and preferably hourly
// * Always generate keys using a cryptographically-secure random data source
repeated DataSource keys = 1 [(validate.rules).repeated.min_items = 1];
repeated DataSource keys = 1 [(validate.rules).repeated .min_items = 1];
}
message CertificateValidationContext {
// TLS certificate data containing certificate authority certificates to use in verifying
// a presented client side certificate. If not specified and a client certificate is presented it
// will not be verified. By default, a client certificate is optional, unless one of the additional
// options (:ref:`require_client_certificate <envoy_api_field_DownstreamTlsContext.require_client_certificate>`,
// :ref:`verify_certificate_hash <envoy_api_field_CertificateValidationContext.verify_certificate_hash>`, or
// :ref:`verify_subject_alt_name <envoy_api_field_CertificateValidationContext.verify_subject_alt_name>`) is also
// specified.
// will not be verified. By default, a client certificate is optional, unless one of the
// additional options (:ref:`require_client_certificate
// <envoy_api_field_DownstreamTlsContext.require_client_certificate>`,
// :ref:`verify_certificate_hash
// <envoy_api_field_CertificateValidationContext.verify_certificate_hash>`, or
// :ref:`verify_subject_alt_name
// <envoy_api_field_CertificateValidationContext.verify_subject_alt_name>`) is also specified.
DataSource trusted_ca = 1;
// If specified, Envoy will verify (pin) the hex-encoded SHA-256 hash of
@ -182,7 +182,7 @@ message CommonTlsContext {
//
// Although this is a list, currently only a single certificate is supported. This will be
// relaxed in the future.
repeated TlsCertificate tls_certificates = 2 [(validate.rules).repeated.max_items = 1];
repeated TlsCertificate tls_certificates = 2 [(validate.rules).repeated .max_items = 1];
// [#not-implemented-hide:]
repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6;

@ -1,11 +1,18 @@
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
load("@com_lyft_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library")
def _CcSuffix(d):
return d + "_cc"
_PY_SUFFIX="_py"
_CC_SUFFIX="_cc"
def _Suffix(d, suffix):
return d + suffix
def _LibrarySuffix(library_name, suffix):
# Transform //a/b/c to //a/b/c:c in preparation for suffix operation below.
if library_name.startswith("//") and ":" not in library_name:
library_name += ":" + Label(library_name).name
return _Suffix(library_name, suffix)
def _PySuffix(d):
return d + "_py"
# TODO(htuch): has_services is currently ignored but will in future support
# gRPC stub generation.
@ -14,11 +21,11 @@ def _PySuffix(d):
# https://github.com/bazelbuild/bazel/issues/2626 are resolved.
def api_py_proto_library(name, srcs = [], deps = [], has_services = 0):
py_proto_library(
name = _PySuffix(name),
name = _Suffix(name, _PY_SUFFIX),
srcs = srcs,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
deps = [_PySuffix(d) for d in deps] + [
deps = [_LibrarySuffix(d, _PY_SUFFIX) for d in deps] + [
"@com_lyft_protoc_gen_validate//validate:validate_py",
"@googleapis//:http_api_protos_py",
],
@ -54,9 +61,9 @@ def api_proto_library(name, srcs = [], deps = [], has_services = 0, require_py =
# provider. Hopefully one day we can move to a model where this target and
# the proto_library above are aligned.
pgv_cc_proto_library(
name = _CcSuffix(name),
name = _Suffix(name, _CC_SUFFIX),
srcs = srcs,
deps = [_CcSuffix(d) for d in deps],
deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in deps],
external_deps = [
"@com_google_protobuf//:cc_wkt_protos",
"@googleapis//:http_api_protos",
@ -70,5 +77,5 @@ def api_cc_test(name, srcs, proto_deps):
native.cc_test(
name = name,
srcs = srcs,
deps = [_CcSuffix(d) for d in proto_deps],
deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in proto_deps],
)

@ -6,7 +6,6 @@ api_cc_test(
name = "build_test",
srcs = ["build_test.cc"],
proto_deps = [
"//api/filter/accesslog:accesslog",
"//api:cds",
"//api:discovery",
"//api:eds",
@ -15,5 +14,6 @@ api_cc_test(
"//api:metrics",
"//api:rds",
"//api:rls",
"//api/filter/accesslog",
],
)

@ -4,7 +4,7 @@
#include "google/protobuf/descriptor.h"
// Basic C++ build/link validation for the v2 xDS APIs.
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
const auto methods = {
"envoy.api.v2.filter.accesslog.AccessLogService.StreamAccessLogs",
"envoy.api.v2.AggregatedDiscoveryService.StreamAggregatedResources",

@ -7,14 +7,13 @@
// from data-plane-api.
// TODO(htuch): Switch to using real data-plane-api protos once we can support
// the required field types.
int main(int argc, char *argv[]) {
int main(int argc, char* argv[]) {
{
test::validate::Foo empty;
std::string err;
if (Validate(empty, &err)) {
std::cout << "Unexpected successful validation of empty proto."
<< std::endl;
std::cout << "Unexpected successful validation of empty proto." << std::endl;
exit(EXIT_FAILURE);
}
}
@ -25,8 +24,7 @@ int main(int argc, char *argv[]) {
std::string err;
if (!Validate(non_empty, &err)) {
std::cout << "Unexpected failed validation of empty proto: " << err
<< std::endl;
std::cout << "Unexpected failed validation of empty proto: " << err << std::endl;
exit(EXIT_FAILURE);
}
}

@ -3,9 +3,9 @@ licenses(["notice"]) # Apache 2
py_binary(
name = "protodoc",
srcs = ["protodoc.py"],
visibility = ["//visibility:public"],
deps = [
"@com_lyft_protoc_gen_validate//validate:validate_py",
"@com_google_protobuf//:protobuf_python",
"@com_lyft_protoc_gen_validate//validate:validate_py",
],
visibility = ["//visibility:public"],
)

Loading…
Cancel
Save