Merge remote-tracking branch 'upstream/master' into xds_no_nack_on_missing_resource

pull/22293/head
Mark D. Roth 5 years ago
commit 441c3aa792
  1. 1
      BUILD
  2. 13
      CMakeLists.txt
  3. 34
      include/grpcpp/opencensus.h
  4. 47
      include/grpcpp/opencensus_impl.h
  5. 65
      src/cpp/ext/filters/census/grpc_plugin.cc
  6. 32
      src/cpp/ext/filters/census/views.cc
  7. 8
      src/python/grpcio_tests/tests_aio/unit/call_test.py
  8. 13
      templates/CMakeLists.txt.template
  9. 2
      tools/internal_ci/linux/grpc_xds.cfg
  10. 2
      tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh
  11. 87
      tools/run_tests/run_xds_tests.py

@ -2322,7 +2322,6 @@ grpc_cc_library(
],
hdrs = [
"include/grpcpp/opencensus.h",
"include/grpcpp/opencensus_impl.h",
"src/cpp/ext/filters/census/channel_filter.h",
"src/cpp/ext/filters/census/client_filter.h",
"src/cpp/ext/filters/census/context.h",

@ -152,6 +152,14 @@ if(WIN32)
set(_gRPC_PLATFORM_WINDOWS ON)
endif()
# Use C99 standard
set(CMAKE_C_STANDARD 99)
# Add c++11 flags
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
@ -201,11 +209,6 @@ include(cmake/ssl.cmake)
include(cmake/upb.cmake)
include(cmake/zlib.cmake)
if(NOT MSVC)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
endif()
if(_gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_IOS)
set(_gRPC_ALLTARGETS_LIBRARIES ${CMAKE_DL_LIBS} m pthread)
elseif(_gRPC_PLATFORM_ANDROID)

@ -19,20 +19,32 @@
#ifndef GRPCPP_OPENCENSUS_H
#define GRPCPP_OPENCENSUS_H
#include "grpcpp/opencensus_impl.h"
#include "opencensus/trace/span.h"
namespace grpc_impl {
class ServerContext;
}
namespace grpc {
// These symbols in this file will not be included in the binary unless
// grpc_opencensus_plugin build target was added as a dependency. At the moment
// it is only setup to be built with Bazel.
static inline void RegisterOpenCensusPlugin() {
::grpc_impl::RegisterOpenCensusPlugin();
}
static inline void RegisterOpenCensusViewsForExport() {
::grpc_impl::RegisterOpenCensusViewsForExport();
}
static inline ::opencensus::trace::Span GetSpanFromServerContext(
::grpc_impl::ServerContext* context) {
return ::grpc_impl::GetSpanFromServerContext(context);
}
// Registers the OpenCensus plugin with gRPC, so that it will be used for future
// RPCs. This must be called before any views are created.
void RegisterOpenCensusPlugin();
// RPC stats definitions, defined by
// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/gRPC.md
// Registers the cumulative gRPC views so that they will be exported by any
// registered stats exporter. For on-task stats, construct a View using the
// ViewDescriptors below.
void RegisterOpenCensusViewsForExport();
// Returns the tracing Span for the current RPC.
::opencensus::trace::Span GetSpanFromServerContext(
::grpc_impl::ServerContext* context);
} // namespace grpc

@ -1,47 +0,0 @@
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPCPP_OPENCENSUS_IMPL_H
#define GRPCPP_OPENCENSUS_IMPL_H
#include "opencensus/trace/span.h"
namespace grpc_impl {
class ServerContext;
// These symbols in this file will not be included in the binary unless
// grpc_opencensus_plugin build target was added as a dependency. At the moment
// it is only setup to be built with Bazel.
// Registers the OpenCensus plugin with gRPC, so that it will be used for future
// RPCs. This must be called before any views are created.
void RegisterOpenCensusPlugin();
// RPC stats definitions, defined by
// https://github.com/census-instrumentation/opencensus-specs/blob/master/stats/gRPC.md
// Registers the cumulative gRPC views so that they will be exported by any
// registered stats exporter. For on-task stats, construct a View using the
// ViewDescriptors below.
void RegisterOpenCensusViewsForExport();
// Returns the tracing Span for the current RPC.
::opencensus::trace::Span GetSpanFromServerContext(ServerContext* context);
} // namespace grpc_impl
#endif // GRPCPP_OPENCENSUS_IMPL_H

@ -31,6 +31,35 @@
namespace grpc {
void RegisterOpenCensusPlugin() {
RegisterChannelFilter<CensusChannelData, CensusClientCallData>(
"opencensus_client", GRPC_CLIENT_CHANNEL, INT_MAX /* priority */,
nullptr /* condition function */);
RegisterChannelFilter<CensusChannelData, CensusServerCallData>(
"opencensus_server", GRPC_SERVER_CHANNEL, INT_MAX /* priority */,
nullptr /* condition function */);
// Access measures to ensure they are initialized. Otherwise, creating a view
// before the first RPC would cause an error.
RpcClientSentBytesPerRpc();
RpcClientReceivedBytesPerRpc();
RpcClientRoundtripLatency();
RpcClientServerLatency();
RpcClientSentMessagesPerRpc();
RpcClientReceivedMessagesPerRpc();
RpcServerSentBytesPerRpc();
RpcServerReceivedBytesPerRpc();
RpcServerServerLatency();
RpcServerSentMessagesPerRpc();
RpcServerReceivedMessagesPerRpc();
}
::opencensus::trace::Span GetSpanFromServerContext(ServerContext* context) {
return reinterpret_cast<const CensusContext*>(context->census_context())
->Span();
}
// These measure definitions should be kept in sync across opencensus
// implementations--see
// https://github.com/census-instrumentation/opencensus-java/blob/master/contrib/grpc_metrics/src/main/java/io/opencensus/contrib/grpc/metrics/RpcMeasureConstants.java.
@ -98,39 +127,5 @@ ABSL_CONST_INIT const absl::string_view
ABSL_CONST_INIT const absl::string_view kRpcServerServerLatencyMeasureName =
"grpc.io/server/server_latency";
} // namespace grpc
namespace grpc_impl {
void RegisterOpenCensusPlugin() {
grpc::RegisterChannelFilter<grpc::CensusChannelData,
grpc::CensusClientCallData>(
"opencensus_client", GRPC_CLIENT_CHANNEL, INT_MAX /* priority */,
nullptr /* condition function */);
grpc::RegisterChannelFilter<grpc::CensusChannelData,
grpc::CensusServerCallData>(
"opencensus_server", GRPC_SERVER_CHANNEL, INT_MAX /* priority */,
nullptr /* condition function */);
// Access measures to ensure they are initialized. Otherwise, creating a view
// before the first RPC would cause an error.
grpc::RpcClientSentBytesPerRpc();
grpc::RpcClientReceivedBytesPerRpc();
grpc::RpcClientRoundtripLatency();
grpc::RpcClientServerLatency();
grpc::RpcClientSentMessagesPerRpc();
grpc::RpcClientReceivedMessagesPerRpc();
grpc::RpcServerSentBytesPerRpc();
grpc::RpcServerReceivedBytesPerRpc();
grpc::RpcServerServerLatency();
grpc::RpcServerSentMessagesPerRpc();
grpc::RpcServerReceivedMessagesPerRpc();
}
::opencensus::trace::Span GetSpanFromServerContext(
grpc::ServerContext* context) {
return reinterpret_cast<const grpc::CensusContext*>(context->census_context())
->Span();
}
} // namespace grpc_impl
} // namespace grpc

@ -25,23 +25,6 @@
#include "opencensus/stats/internal/set_aggregation_window.h"
#include "opencensus/stats/stats.h"
namespace grpc_impl {
void RegisterOpenCensusViewsForExport() {
grpc::ClientSentMessagesPerRpcCumulative().RegisterForExport();
grpc::ClientSentBytesPerRpcCumulative().RegisterForExport();
grpc::ClientReceivedMessagesPerRpcCumulative().RegisterForExport();
grpc::ClientReceivedBytesPerRpcCumulative().RegisterForExport();
grpc::ClientRoundtripLatencyCumulative().RegisterForExport();
grpc::ClientServerLatencyCumulative().RegisterForExport();
grpc::ServerSentMessagesPerRpcCumulative().RegisterForExport();
grpc::ServerSentBytesPerRpcCumulative().RegisterForExport();
grpc::ServerReceivedMessagesPerRpcCumulative().RegisterForExport();
grpc::ServerReceivedBytesPerRpcCumulative().RegisterForExport();
grpc::ServerServerLatencyCumulative().RegisterForExport();
}
} // namespace grpc_impl
namespace grpc {
using ::opencensus::stats::Aggregation;
@ -88,6 +71,21 @@ ViewDescriptor HourDescriptor() {
} // namespace
void RegisterOpenCensusViewsForExport() {
ClientSentMessagesPerRpcCumulative().RegisterForExport();
ClientSentBytesPerRpcCumulative().RegisterForExport();
ClientReceivedMessagesPerRpcCumulative().RegisterForExport();
ClientReceivedBytesPerRpcCumulative().RegisterForExport();
ClientRoundtripLatencyCumulative().RegisterForExport();
ClientServerLatencyCumulative().RegisterForExport();
ServerSentMessagesPerRpcCumulative().RegisterForExport();
ServerSentBytesPerRpcCumulative().RegisterForExport();
ServerReceivedMessagesPerRpcCumulative().RegisterForExport();
ServerReceivedBytesPerRpcCumulative().RegisterForExport();
ServerServerLatencyCumulative().RegisterForExport();
}
// client cumulative
const ViewDescriptor& ClientSentBytesPerRpcCumulative() {
const static ViewDescriptor descriptor =

@ -457,16 +457,16 @@ class TestUnaryStreamCall(_MulticallableTestMixin, AioTestBase):
# Should be around the same as the timeout
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT * 3 // 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 2)
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 5 / 2)
response = await call.read()
self.assertEqual(_RESPONSE_PAYLOAD_SIZE, len(response.payload.body))
# Should be around the timeout minus a unit of wait time
remained_time = call.time_remaining()
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT // 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 3 // 2)
self.assertGreater(remained_time, test_constants.SHORT_TIMEOUT / 2)
self.assertLess(remained_time, test_constants.SHORT_TIMEOUT * 3 / 2)
self.assertEqual(grpc.StatusCode.OK, await call.code())

@ -242,6 +242,14 @@
set(_gRPC_PLATFORM_WINDOWS ON)
endif()
# Use C99 standard
set(CMAKE_C_STANDARD 99)
# Add c++11 flags
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
## Some libraries are shared even with BUILD_SHARED_LIBRARIES=OFF
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
set(CMAKE_MODULE_PATH "<%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/cmake/modules")
@ -292,11 +300,6 @@
include(cmake/upb.cmake)
include(cmake/zlib.cmake)
if(NOT MSVC)
set(CMAKE_C_FLAGS "<%text>${CMAKE_C_FLAGS}</%text> -std=c99")
set(CMAKE_CXX_FLAGS "<%text>${CMAKE_CXX_FLAGS}</%text> -std=c++11")
endif()
if(_gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_IOS)
set(_gRPC_ALLTARGETS_LIBRARIES <%text>${CMAKE_DL_LIBS}</%text> m pthread)
elseif(_gRPC_PLATFORM_ANDROID)

@ -16,7 +16,7 @@
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_bazel.sh"
timeout_mins: 60
timeout_mins: 90
env_vars {
key: "BAZEL_SCRIPT"
value: "tools/internal_ci/linux/grpc_xds_bazel_test_in_docker.sh"

@ -52,4 +52,4 @@ bazel build test/cpp/interop:xds_interop_client
--project_id=grpc-testing \
--gcp_suffix=$(date '+%s') \
--verbose \
--client_cmd='bazel-bin/test/cpp/interop/xds_interop_client --server=xds-experimental:///{service_host}:{service_port} --stats_port={stats_port} --qps={qps}'
--client_cmd='GRPC_VERBOSITY=debug GRPC_TRACE=xds,xds_client bazel-bin/test/cpp/interop/xds_interop_client --server=xds-experimental:///{server_uri} --stats_port={stats_port} --qps={qps}'

@ -34,6 +34,8 @@ from src.proto.grpc.testing import test_pb2_grpc
logger = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(asctime)s: %(levelname)-8s %(message)s')
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
@ -71,8 +73,8 @@ argp.add_argument(
'--client_cmd',
default=None,
help='Command to launch xDS test client. This script will fill in '
'{service_host}, {service_port},{stats_port} and {qps} parameters using '
'str.format(), and generate the GRPC_XDS_BOOTSTRAP file.')
'{server_uri}, {stats_port} and {qps} parameters using str.format(), and '
'generate the GRPC_XDS_BOOTSTRAP file.')
argp.add_argument('--zone', default='us-central1-a')
argp.add_argument('--secondary_zone',
default='us-west1-b',
@ -101,12 +103,16 @@ argp.add_argument('--network',
default='global/networks/default',
help='GCP network to use')
argp.add_argument('--service_port_range',
default='8080:8100',
default='80',
type=parse_port_range,
help='Listening port for created gRPC backends. Specified as '
'either a single int or as a range in the format min:max, in '
'which case an available port p will be chosen s.t. min <= p '
'<= max')
argp.add_argument('--forwarding_rule_ip_prefix',
default='172.16.0.',
help='If set, an available IP with this prefix followed by '
'0-255 will be used for the generated forwarding rule.')
argp.add_argument(
'--stats_port',
default=8079,
@ -135,11 +141,13 @@ args = argp.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
_DEFAULT_SERVICE_PORT = 80
_WAIT_FOR_BACKEND_SEC = args.wait_for_backend_sec
_WAIT_FOR_OPERATION_SEC = 60
_WAIT_FOR_OPERATION_SEC = 120
_INSTANCE_GROUP_SIZE = 2
_NUM_TEST_RPCS = 10 * args.qps
_WAIT_FOR_STATS_SEC = 60
_WAIT_FOR_STATS_SEC = 120
_WAIT_FOR_URL_MAP_PATCH_SEC = 300
_BOOTSTRAP_TEMPLATE = """
{{
"node": {{
@ -226,6 +234,7 @@ def wait_until_all_rpcs_go_to_given_backends(backends,
def test_backends_restart(gcp, backend_service, instance_group):
logger.info('Running test_backends_restart')
instance_names = get_instance_names(gcp, instance_group)
num_instances = len(instance_names)
start_time = time.time()
@ -256,6 +265,7 @@ def test_backends_restart(gcp, backend_service, instance_group):
def test_change_backend_service(gcp, original_backend_service, instance_group,
alternate_backend_service,
same_zone_instance_group):
logger.info('Running test_change_backend_service')
original_backend_instances = get_instance_names(gcp, instance_group)
alternate_backend_instances = get_instance_names(gcp,
same_zone_instance_group)
@ -272,7 +282,7 @@ def test_change_backend_service(gcp, original_backend_service, instance_group,
if stats.num_failures > 0:
raise Exception('Unexpected failure: %s', stats)
wait_until_all_rpcs_go_to_given_backends(alternate_backend_instances,
_WAIT_FOR_STATS_SEC)
_WAIT_FOR_URL_MAP_PATCH_SEC)
finally:
patch_url_map_backend_service(gcp, original_backend_service)
patch_backend_instances(gcp, alternate_backend_service, [])
@ -281,9 +291,13 @@ def test_change_backend_service(gcp, original_backend_service, instance_group,
def test_new_instance_group_receives_traffic(gcp, backend_service,
instance_group,
same_zone_instance_group):
logger.info('Running test_new_instance_group_receives_traffic')
instance_names = get_instance_names(gcp, instance_group)
# TODO(ericgribkoff) Reduce this timeout. When running sequentially, this
# occurs after patching the url map in test_change_backend_service, so we
# need the extended timeout here as well.
wait_until_all_rpcs_go_to_given_backends(instance_names,
_WAIT_FOR_STATS_SEC)
_WAIT_FOR_URL_MAP_PATCH_SEC)
try:
patch_backend_instances(gcp,
backend_service,
@ -301,6 +315,7 @@ def test_new_instance_group_receives_traffic(gcp, backend_service,
def test_ping_pong(gcp, backend_service, instance_group):
logger.info('Running test_ping_pong')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
wait_until_all_rpcs_go_to_given_backends(instance_names,
@ -309,6 +324,7 @@ def test_ping_pong(gcp, backend_service, instance_group):
def test_remove_instance_group(gcp, backend_service, instance_group,
same_zone_instance_group):
logger.info('Running test_remove_instance_group')
try:
patch_backend_instances(gcp,
backend_service,
@ -334,6 +350,7 @@ def test_remove_instance_group(gcp, backend_service, instance_group,
def test_round_robin(gcp, backend_service, instance_group):
logger.info('Running test_round_robin')
wait_for_healthy_backends(gcp, backend_service, instance_group)
instance_names = get_instance_names(gcp, instance_group)
threshold = 1
@ -355,6 +372,9 @@ def test_round_robin(gcp, backend_service, instance_group):
def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
gcp, backend_service, primary_instance_group,
secondary_zone_instance_group):
logger.info(
'Running test_secondary_locality_gets_no_requests_on_partial_primary_failure'
)
try:
patch_backend_instances(
gcp, backend_service,
@ -381,6 +401,8 @@ def test_secondary_locality_gets_no_requests_on_partial_primary_failure(
def test_secondary_locality_gets_requests_on_primary_failure(
gcp, backend_service, primary_instance_group,
secondary_zone_instance_group):
logger.info(
'Running test_secondary_locality_gets_requests_on_primary_failure')
try:
patch_backend_instances(
gcp, backend_service,
@ -453,6 +475,7 @@ nohup build/install/grpc-interop-testing/bin/xds-test-server --port=%d 1>/dev/nu
}
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceTemplates().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -470,6 +493,7 @@ def add_instance_group(gcp, zone, name, size):
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.instanceGroupManagers().insert(project=gcp.project,
zone=zone,
body=config).execute()
@ -491,6 +515,7 @@ def create_health_check(gcp, name):
'portName': 'grpc'
}
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.healthChecks().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -507,6 +532,7 @@ def create_health_check_firewall_rule(gcp, name):
'sourceRanges': ['35.191.0.0/16', '130.211.0.0/22'],
'targetTags': ['allow-health-checks'],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.firewalls().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -522,6 +548,7 @@ def add_backend_service(gcp, name):
'portName': 'grpc',
'protocol': 'HTTP2'
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.backendServices().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -543,6 +570,7 @@ def create_url_map(gcp, name, backend_service, host_name):
'pathMatcher': _PATH_MATCHER_NAME
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -554,21 +582,23 @@ def create_target_http_proxy(gcp, name):
'name': name,
'url_map': gcp.url_map.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.targetHttpProxies().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
gcp.target_http_proxy = GcpResource(config['name'], result['targetLink'])
def create_global_forwarding_rule(gcp, name, port):
def create_global_forwarding_rule(gcp, name, ip, port):
config = {
'name': name,
'loadBalancingScheme': 'INTERNAL_SELF_MANAGED',
'portRange': str(port),
'IPAddress': '0.0.0.0',
'IPAddress': ip,
'network': args.network,
'target': gcp.target_http_proxy.url,
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.globalForwardingRules().insert(project=gcp.project,
body=config).execute()
wait_for_global_operation(gcp, result['name'])
@ -671,6 +701,7 @@ def patch_backend_instances(gcp,
'maxRate': 1 if balancing_mode == 'RATE' else None
} for instance_group in instance_groups],
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.backendServices().patch(
project=gcp.project, backendService=backend_service.name,
body=config).execute()
@ -706,6 +737,7 @@ def patch_url_map_backend_service(gcp, backend_service):
'defaultService': backend_service.url,
}]
}
logger.debug('Sending GCP request with body=%s', config)
result = gcp.compute.urlMaps().patch(project=gcp.project,
urlMap=gcp.url_map.name,
body=config).execute()
@ -886,18 +918,28 @@ try:
create_target_http_proxy(gcp, target_http_proxy_name)
potential_service_ports = list(args.service_port_range)
random.shuffle(potential_service_ports)
if args.forwarding_rule_ip_prefix == '':
potential_ips = ['0.0.0.0']
else:
potential_ips = [
args.forwarding_rule_ip_prefix + str(x) for x in range(256)
]
random.shuffle(potential_ips)
for port in potential_service_ports:
try:
create_global_forwarding_rule(gcp, forwarding_rule_name, port)
gcp.service_port = port
break
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'port %d. Retrying with another port.' % (http_error, port))
for ip in potential_ips:
try:
create_global_forwarding_rule(gcp, forwarding_rule_name, ip,
port)
gcp.service_port = port
break
except googleapiclient.errors.HttpError as http_error:
logger.warning(
'Got error %s when attempting to create forwarding rule to '
'%s:%d. Retrying with another ip:port.' %
(http_error, ip, port))
if not gcp.service_port:
raise Exception('Failed to pick a service port in the range %s' %
args.service_port_range)
raise Exception(
'Failed to find a valid ip:port for the forwarding rule')
create_instance_template(gcp, template_name, args.network,
args.source_image)
instance_group = add_instance_group(gcp, args.zone, instance_group_name,
@ -975,8 +1017,11 @@ try:
wait_for_healthy_backends(gcp, backend_service, instance_group)
cmd = args.client_cmd.format(service_host=service_host_name,
service_port=gcp.service_port,
if gcp.service_port == _DEFAULT_SERVICE_PORT:
server_uri = service_host_name
else:
server_uri = service_host_name + ':' + str(gcp.service_port)
cmd = args.client_cmd.format(server_uri=server_uri,
stats_port=args.stats_port,
qps=args.qps)
client_process = start_xds_client(cmd)

Loading…
Cancel
Save