adding check_format and fix_format (#300)

Signed-off-by: Alyssa Wilk <alyssar@chromium.org>
pull/246/head
alyssawilk 7 years ago committed by Matt Klein
parent b227517de0
commit 99dab97a47
  1. 9
      .circleci/config.yml
  2. 2
      .clang-format
  3. 4
      api/BUILD
  4. 1
      api/bootstrap.proto
  5. 9
      api/cds.proto
  6. 3
      api/discovery.proto
  7. 18
      api/eds.proto
  8. 2
      api/filter/accesslog/BUILD
  9. 12
      api/filter/accesslog/accesslog.proto
  10. 3
      api/filter/fault.proto
  11. 2
      api/filter/http/BUILD
  12. 6
      api/filter/http/buffer.proto
  13. 5
      api/filter/http/fault.proto
  14. 4
      api/filter/network/BUILD
  15. 6
      api/filter/network/http_connection_manager.proto
  16. 3
      api/filter/network/tcp_proxy.proto
  17. 3
      api/hds.proto
  18. 3
      api/health_check.proto
  19. 25
      api/lds.proto
  20. 3
      api/metrics_service.proto
  21. 19
      api/rds.proto
  22. 7
      api/rls.proto
  23. 26
      api/sds.proto
  24. 25
      bazel/api_build_system.bzl
  25. 2
      test/build/BUILD
  26. 6
      test/validate/pgv_test.cc
  27. 0
      tools/check_format.py
  28. 4
      tools/protodoc/BUILD

@ -20,6 +20,14 @@ jobs:
- run: docs/publish.sh
- store_artifacts:
path: generated/docs
format:
docker:
- image: lyft/envoy-build:114e24c6fd05fc026492e9d2ca5608694e5ea59d
resource_class: xlarge
working_directory: /source
steps:
- checkout
- run: ci/do_ci.sh check_format
workflows:
version: 2
@ -27,3 +35,4 @@ workflows:
jobs:
- test
- docs
- format

@ -10,5 +10,7 @@ SortIncludes: false
---
Language: Proto
ColumnLimit: 100
SpacesInContainerLiterals: false
AllowShortFunctionsOnASingleLine: false
...

@ -96,11 +96,11 @@ api_proto_library(
name = "metrics",
srcs = ["metrics_service.proto"],
has_services = 1,
require_py = 0,
deps = [
":base",
"@promotheus_metrics_model//:client_model",
],
require_py = 0,
)
api_proto_library(
@ -146,7 +146,7 @@ proto_library(
":lds",
":protocol",
":rds",
"//api/filter/accesslog:accesslog",
"//api/filter/accesslog",
"//api/filter/http:buffer",
"//api/filter/http:fault",
"//api/filter/http:health_check",

@ -250,7 +250,6 @@ message Watchdog {
// kill behavior. If not specified the default is 0 (disabled).
google.protobuf.Duration kill_timeout = 3;
// If at least two watched threads have been nonresponsive for at least this
// duration assume a true deadlock and kill the entire Envoy process. Set to 0
// to disable this behavior. If not specified the default is 0 (disabled).

@ -20,12 +20,10 @@ import "validate/validate.proto";
// Return list of all clusters this proxy will load balance to.
service ClusterDiscoveryService {
rpc StreamClusters(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchClusters(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:clusters"
body: "*"
@ -319,7 +317,8 @@ message Cluster {
// The % chance that a host will be actually ejected when an outlier status
// is detected through consecutive gateway failures. This setting can be
// used to disable ejection or to ramp it up slowly. Defaults to 0.
google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 [(validate.rules).uint32.lte = 100];
google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11
[(validate.rules).uint32.lte = 100];
}
// If specified, outlier detection will be enabled for this upstream cluster.

@ -16,8 +16,7 @@ import "google/protobuf/any.proto";
// the multiplexed singleton APIs at the Envoy instance and management server.
service AggregatedDiscoveryService {
// This is a gRPC-only API.
rpc StreamAggregatedResources(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
}

@ -17,12 +17,10 @@ import "validate/validate.proto";
service EndpointDiscoveryService {
// The resource_names field in DiscoveryRequest specifies a list of clusters
// to subscribe to updates for.
rpc StreamEndpoints(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchEndpoints(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:endpoints"
body: "*"
@ -57,8 +55,7 @@ service EndpointDiscoveryService {
// 6. The management server uses the load reports from all reported Envoys
// from around the world, computes global assignment and prepares traffic
// assignment destined for each zone Envoys are located in. Goto 2.
rpc StreamLoadStats(stream LoadStatsRequest)
returns (stream LoadStatsResponse) {
rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) {
}
}
@ -93,7 +90,8 @@ message LbEndpoint {
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte:1, lte:128}];
google.protobuf.UInt32Value load_balancing_weight = 4
[(validate.rules).uint32 = {gte: 1, lte: 128}];
}
// A group of endpoints belonging to a Locality.
@ -123,7 +121,8 @@ message LocalityLbEndpoints {
// The limit of 128 is somewhat arbitrary, but is applied due to performance
// concerns with the current implementation and can be removed when
// `this issue <https://github.com/envoyproxy/envoy/issues/1285>`_ is fixed.
google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte:1, lte:128}];
google.protobuf.UInt32Value load_balancing_weight = 3
[(validate.rules).uint32 = {gte: 1, lte: 128}];
// Optional: the priority for this LocalityLbEndpoints. If unspecified this will
// default to the highest priority (0).
@ -207,7 +206,8 @@ message ClusterStats {
string cluster_name = 1 [(validate.rules).string.min_bytes = 1];
// Need at least one.
repeated UpstreamLocalityStats upstream_locality_stats = 2 [(validate.rules).repeated.min_items = 1];
repeated UpstreamLocalityStats upstream_locality_stats = 2
[(validate.rules).repeated .min_items = 1];
// Cluster-level stats such as total_successful_requests may be computed by
// summing upstream_locality_stats. In addition, below there are additional

@ -6,6 +6,6 @@ api_proto_library(
has_services = 1,
deps = [
"//api:address",
"//api:base"
"//api:base",
],
)

@ -275,11 +275,13 @@ message DurationFilter {
// Filters for requests that are not health check requests. A health check
// request is marked by the health check filter.
message NotHealthCheckFilter {}
message NotHealthCheckFilter {
}
// Filters for requests that are traceable. See the tracing overview for more
// information on how a request becomes traceable.
message TraceableFilter {}
message TraceableFilter {
}
// Filters for random sampling of requests. Sampling pivots on the header
// :ref:`x-request-id<config_http_conn_man_headers_x-request-id>` being present. If
@ -396,7 +398,8 @@ message StreamAccessLogsMessage {
// Empty response for the StreamAccessLogs API. Will never be sent. See below.
// [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs.
message StreamAccessLogsResponse {}
message StreamAccessLogsResponse {
}
// Service for streaming access logs from Envoy to an access log server.
service AccessLogService {
@ -406,8 +409,7 @@ service AccessLogService {
// API for "critical" access logs in which Envoy will buffer access logs for some period of time
// until it gets an ACK so it could then retry. This API is designed for high throughput with the
// expectation that it might be lossy.
rpc StreamAccessLogs(stream StreamAccessLogsMessage)
returns (StreamAccessLogsResponse) {
rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) {
}
}

@ -17,7 +17,8 @@ message FaultDelay {
FIXED = 0;
}
// Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is supported.
// Delay type to use (fixed|exponential|..). Currently, only fixed delay (step function) is
// supported.
FaultDelayType type = 1 [(validate.rules).enum.defined_only = true];
// An integer between 0-100 indicating the percentage of operations/connection requests

@ -5,7 +5,7 @@ licenses(["notice"]) # Apache 2
api_proto_library(
name = "router",
srcs = ["router.proto"],
deps = ["//api/filter/accesslog:accesslog"],
deps = ["//api/filter/accesslog"],
)
api_proto_library(

@ -17,8 +17,6 @@ message Buffer {
// The maximum number of seconds that the filter will wait for a complete
// request before returning a 408 response.
google.protobuf.Duration max_request_time = 2 [(validate.rules).duration = {
required: true,
gt: {}
}];
google.protobuf.Duration max_request_time = 2
[(validate.rules).duration = {required: true, gt: {}}];
}

@ -19,10 +19,7 @@ message FaultAbort {
option (validate.required) = true;
// HTTP status code to use to abort the HTTP request.
uint32 http_status = 2 [(validate.rules).uint32 = {
gte: 200,
lt: 600
}];
uint32 http_status = 2 [(validate.rules).uint32 = {gte: 200, lt: 600}];
}
}

@ -9,7 +9,7 @@ api_proto_library(
"//api:base",
"//api:protocol",
"//api:rds",
"//api/filter/accesslog:accesslog",
"//api/filter/accesslog",
],
)
@ -23,8 +23,8 @@ api_proto_library(
name = "tcp_proxy",
srcs = ["tcp_proxy.proto"],
deps = [
"//api/filter/accesslog:accesslog",
"//api:address",
"//api/filter/accesslog",
],
)

@ -173,9 +173,9 @@ message HttpConnectionManager {
google.protobuf.BoolValue use_remote_address = 14;
// Whether the connection manager will generate the :ref:`x-request-id
// <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to true.
// Generating a random UUID4 is expensive so in high throughput scenarios where this feature is
// not desired it can be disabled.
// <config_http_conn_man_headers_x-request-id>` header if it does not exist. This defaults to
// true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature
// is not desired it can be disabled.
google.protobuf.BoolValue generate_request_id = 15;
// How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP

@ -13,7 +13,8 @@ import "validate/validate.proto";
// [#protodoc-title: TCP Proxy]
// TCP Proxy :ref:`configuration overview <config_network_filters_tcp_proxy>`.
// [#v2-api-diff: The route match now takes place in the :ref:`FilterChainMatch <envoy_api_msg_FilterChainMatch>` table].
// [#v2-api-diff: The route match now takes place in the :ref:`FilterChainMatch
// <envoy_api_msg_FilterChainMatch>` table].
message TcpProxy {
// The prefix to use when emitting :ref:`statistics

@ -56,8 +56,7 @@ service HealthDiscoveryService {
// TODO(htuch): Unlike the gRPC version, there is no stream-based binding of
// request/response. Should we add an identifier to the HealthCheckSpecifier
// to bind with the response?
rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse)
returns (HealthCheckSpecifier) {
rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) {
option (google.api.http) = {
post: "/v2/discovery:health_check"
body: "*"

@ -85,7 +85,8 @@ message HealthCheck {
repeated Payload receive = 2;
}
message RedisHealthCheck {}
message RedisHealthCheck {
}
oneof health_checker {
option (validate.required) = true;

@ -21,12 +21,10 @@ import "validate/validate.proto";
// consist of a complete update of all listeners. Existing connections will be
// allowed to drain from listeners that are no longer present.
service ListenerDiscoveryService {
rpc StreamListeners(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchListeners(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:listeners"
body: "*"
@ -137,8 +135,9 @@ message Listener {
// The unique name by which this listener is known. If no name is provided,
// Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically
// updated or removed via :ref:`LDS <config_listeners_lds>` a unique name must be provided.
// By default, the maximum length of a listener's name is limited to 60 characters. This limit can be
// increased by setting the :option:`--max-obj-name-len` command line argument to the desired value.
// By default, the maximum length of a listener's name is limited to 60 characters. This limit can
// be increased by setting the :option:`--max-obj-name-len` command line argument to the desired
// value.
string name = 1;
// The address that the listener should listen on. In general, the address must be unique, though
@ -156,16 +155,16 @@ message Listener {
// configured. See the :ref:`FAQ entry <faq_how_to_setup_sni>` on how to configure SNI for more
// information. When multiple filter chains are configured, each filter chain must have an
// **identical** set of :ref:`filters <envoy_api_field_FilterChain.filters>`. If the filters
// differ, the configuration will fail to load. In the future, this limitation will be relaxed such that
// different filters can be used depending on which filter chain matches (based on SNI or
// some other parameter).
// differ, the configuration will fail to load. In the future, this limitation will be relaxed
// such that different filters can be used depending on which filter chain matches (based on SNI
// or some other parameter).
repeated FilterChain filter_chains = 3 [(validate.rules).repeated .min_items = 1];
// If a connection is redirected using *iptables*, the port on which the proxy
// receives it might be different from the original destination address. When this flag is set to true,
// the listener hands off redirected connections to the listener associated with the original
// destination address. If there is no listener associated with the original destination address, the
// connection is handled by the listener that receives it. Defaults to false.
// receives it might be different from the original destination address. When this flag is set to
// true, the listener hands off redirected connections to the listener associated with the
// original destination address. If there is no listener associated with the original destination
// address, the connection is handled by the listener that receives it. Defaults to false.
google.protobuf.BoolValue use_original_dst = 4;
// Soft limit on size of the listeners new connection read and write buffers.

@ -18,7 +18,8 @@ service MetricsService {
}
}
message StreamMetricsResponse {}
message StreamMetricsResponse {
}
message StreamMetricsMessage {
message Identifier {

@ -20,12 +20,10 @@ import "validate/validate.proto";
// configurations. Each listener will bind its HTTP connection manager filter to
// a route table via this identifier.
service RouteDiscoveryService {
rpc StreamRoutes(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchRoutes(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:routes"
body: "*"
@ -510,7 +508,8 @@ message RateLimit {
// ("source_cluster", "<local service cluster>")
//
// <local service cluster> is derived from the :option:`--service-cluster` option.
message SourceCluster {}
message SourceCluster {
}
// The following descriptor entry is appended to the descriptor:
//
@ -528,7 +527,8 @@ message RateLimit {
// chooses a cluster randomly from a set of clusters with attributed weight.
// * :ref:`cluster_header <envoy_api_field_RouteAction.cluster_header>` indicates which
// header in the request contains the target cluster.
message DestinationCluster {}
message DestinationCluster {
}
// The following descriptor entry is appended when a header contains a key that matches the
// *header_name*:
@ -552,7 +552,8 @@ message RateLimit {
// .. code-block:: cpp
//
// ("remote_address", "<trusted address from x-forwarded-for>")
message RemoteAddress {}
message RemoteAddress {
}
// The following descriptor entry is appended to the descriptor:
//
@ -659,8 +660,8 @@ message HeaderMatcher {
// The top level element in the routing configuration is a virtual host. Each virtual host has
// a logical name as well as a set of domains that get routed to it based on the incoming request's
// host header. This allows a single listener to service multiple top level domain path trees. Once a
// virtual host is selected based on the domain, the routes are processed in order to see which
// host header. This allows a single listener to service multiple top level domain path trees. Once
// a virtual host is selected based on the domain, the routes are processed in order to see which
// upstream cluster to route to or whether to perform a redirect.
message VirtualHost {
// The logical name of the virtual host. This is used when emitting certain

@ -4,7 +4,8 @@ package envoy.api.v2;
service RateLimitService {
// Determine whether rate limiting should take place.
rpc ShouldRateLimit (RateLimitRequest) returns (RateLimitResponse) {}
rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) {
}
}
// Main message for a rate limit request. The rate limit service is designed to be fully generic
@ -22,8 +23,8 @@ message RateLimitRequest {
// processed by the service (see below). If any of the descriptors are over limit, the entire
// request is considered to be over limit.
repeated RateLimitDescriptor descriptors = 2;
// Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the
// value is not set in the message, a request increases the matched limit by 1.
// Rate limit requests can optionally specify the number of hits a request adds to the matched
// limit. If the value is not set in the message, a request increases the matched limit by 1.
uint32 hits_addend = 3;
}

@ -13,12 +13,10 @@ import "validate/validate.proto";
// [#protodoc-title: Common TLS configuration]
service SecretDiscoveryService {
rpc StreamSecrets(stream DiscoveryRequest)
returns (stream DiscoveryResponse) {
rpc StreamSecrets(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
rpc FetchSecrets(DiscoveryRequest)
returns (DiscoveryResponse) {
rpc FetchSecrets(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:secrets"
body: "*"
@ -128,10 +126,10 @@ message TlsSessionTicketKeys {
//
// .. attention::
//
// Using this feature has serious security considerations and risks. Improper handling of keys may
// result in loss of secrecy in connections, even if ciphers supporting perfect forward secrecy
// are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some discussion.
// To minimize the risk, you must:
// Using this feature has serious security considerations and risks. Improper handling of keys
// may result in loss of secrecy in connections, even if ciphers supporting perfect forward
// secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some
// discussion. To minimize the risk, you must:
//
// * Keep the session ticket keys at least as secure as your TLS certificate private keys
// * Rotate session ticket keys at least daily, and preferably hourly
@ -142,11 +140,13 @@ message TlsSessionTicketKeys {
message CertificateValidationContext {
// TLS certificate data containing certificate authority certificates to use in verifying
// a presented client side certificate. If not specified and a client certificate is presented it
// will not be verified. By default, a client certificate is optional, unless one of the additional
// options (:ref:`require_client_certificate <envoy_api_field_DownstreamTlsContext.require_client_certificate>`,
// :ref:`verify_certificate_hash <envoy_api_field_CertificateValidationContext.verify_certificate_hash>`, or
// :ref:`verify_subject_alt_name <envoy_api_field_CertificateValidationContext.verify_subject_alt_name>`) is also
// specified.
// will not be verified. By default, a client certificate is optional, unless one of the
// additional options (:ref:`require_client_certificate
// <envoy_api_field_DownstreamTlsContext.require_client_certificate>`,
// :ref:`verify_certificate_hash
// <envoy_api_field_CertificateValidationContext.verify_certificate_hash>`, or
// :ref:`verify_subject_alt_name
// <envoy_api_field_CertificateValidationContext.verify_subject_alt_name>`) is also specified.
DataSource trusted_ca = 1;
// If specified, Envoy will verify (pin) the hex-encoded SHA-256 hash of

@ -1,11 +1,18 @@
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
load("@com_lyft_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library")
def _CcSuffix(d):
return d + "_cc"
_PY_SUFFIX="_py"
_CC_SUFFIX="_cc"
def _Suffix(d, suffix):
return d + suffix
def _LibrarySuffix(library_name, suffix):
# Transform //a/b/c to //a/b/c:c in preparation for suffix operation below.
if library_name.startswith("//") and ":" not in library_name:
library_name += ":" + Label(library_name).name
return _Suffix(library_name, suffix)
def _PySuffix(d):
return d + "_py"
# TODO(htuch): has_services is currently ignored but will in future support
# gRPC stub generation.
@ -14,11 +21,11 @@ def _PySuffix(d):
# https://github.com/bazelbuild/bazel/issues/2626 are resolved.
def api_py_proto_library(name, srcs = [], deps = [], has_services = 0):
py_proto_library(
name = _PySuffix(name),
name = _Suffix(name, _PY_SUFFIX),
srcs = srcs,
default_runtime = "@com_google_protobuf//:protobuf_python",
protoc = "@com_google_protobuf//:protoc",
deps = [_PySuffix(d) for d in deps] + [
deps = [_LibrarySuffix(d, _PY_SUFFIX) for d in deps] + [
"@com_lyft_protoc_gen_validate//validate:validate_py",
"@googleapis//:http_api_protos_py",
],
@ -54,9 +61,9 @@ def api_proto_library(name, srcs = [], deps = [], has_services = 0, require_py =
# provider. Hopefully one day we can move to a model where this target and
# the proto_library above are aligned.
pgv_cc_proto_library(
name = _CcSuffix(name),
name = _Suffix(name, _CC_SUFFIX),
srcs = srcs,
deps = [_CcSuffix(d) for d in deps],
deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in deps],
external_deps = [
"@com_google_protobuf//:cc_wkt_protos",
"@googleapis//:http_api_protos",
@ -70,5 +77,5 @@ def api_cc_test(name, srcs, proto_deps):
native.cc_test(
name = name,
srcs = srcs,
deps = [_CcSuffix(d) for d in proto_deps],
deps = [_LibrarySuffix(d, _CC_SUFFIX) for d in proto_deps],
)

@ -6,7 +6,6 @@ api_cc_test(
name = "build_test",
srcs = ["build_test.cc"],
proto_deps = [
"//api/filter/accesslog:accesslog",
"//api:cds",
"//api:discovery",
"//api:eds",
@ -15,5 +14,6 @@ api_cc_test(
"//api:metrics",
"//api:rds",
"//api:rls",
"//api/filter/accesslog",
],
)

@ -13,8 +13,7 @@ int main(int argc, char *argv[]) {
std::string err;
if (Validate(empty, &err)) {
std::cout << "Unexpected successful validation of empty proto."
<< std::endl;
std::cout << "Unexpected successful validation of empty proto." << std::endl;
exit(EXIT_FAILURE);
}
}
@ -25,8 +24,7 @@ int main(int argc, char *argv[]) {
std::string err;
if (!Validate(non_empty, &err)) {
std::cout << "Unexpected failed validation of empty proto: " << err
<< std::endl;
std::cout << "Unexpected failed validation of empty proto: " << err << std::endl;
exit(EXIT_FAILURE);
}
}

@ -3,9 +3,9 @@ licenses(["notice"]) # Apache 2
py_binary(
name = "protodoc",
srcs = ["protodoc.py"],
visibility = ["//visibility:public"],
deps = [
"@com_lyft_protoc_gen_validate//validate:validate_py",
"@com_google_protobuf//:protobuf_python",
"@com_lyft_protoc_gen_validate//validate:validate_py",
],
visibility = ["//visibility:public"],
)

Loading…
Cancel
Save