Merge remote-tracking branch 'upstream/master' into service_config_json

pull/8617/head
Mark D. Roth 9 years ago
commit c56c3ad259
  1. 2
      Makefile
  2. 1
      gRPC-Core.podspec
  3. 51
      src/core/ext/transport/cronet/transport/cronet_transport.c
  4. 2
      src/core/lib/iomgr/wakeup_fd_pipe.c
  5. 2
      src/core/lib/security/transport/handshake.c
  6. 4
      src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.m
  7. 7
      src/proto/grpc/testing/control.proto
  8. 6
      src/proto/grpc/testing/stats.proto
  9. 1
      templates/gRPC-Core.podspec.template
  10. 8
      test/core/end2end/end2end_nosec_tests.c
  11. 8
      test/core/end2end/end2end_tests.c
  12. 1
      test/core/end2end/gen_build_yaml.py
  13. 193
      test/core/end2end/tests/authority_not_supported.c
  14. 15
      test/cpp/qps/driver.cc
  15. 142
      test/cpp/qps/qps_json_driver.cc
  16. 15
      test/cpp/qps/report.cc
  17. 6
      test/cpp/qps/report.h
  18. 2
      test/cpp/qps/server.h
  19. 33
      test/cpp/qps/usage_timer.cc
  20. 2
      test/cpp/qps/usage_timer.h
  21. 26
      tools/run_tests/filter_pull_request_tests.py
  22. 13
      tools/run_tests/jobset.py
  23. 2
      tools/run_tests/report_utils.py
  24. 205
      tools/run_tests/run_tests_matrix.py
  25. 149
      tools/run_tests/sanity/check_test_filtering.py
  26. 1
      tools/run_tests/sanity/sanity_tests.yaml
  27. 651
      tools/run_tests/tests.json

@ -7015,6 +7015,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/simple_request.c \ test/core/end2end/tests/simple_request.c \
test/core/end2end/tests/streaming_error_response.c \ test/core/end2end/tests/streaming_error_response.c \
test/core/end2end/tests/trailing_metadata.c \ test/core/end2end/tests/trailing_metadata.c \
test/core/end2end/tests/authority_not_supported.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \
@ -7099,6 +7100,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/simple_request.c \ test/core/end2end/tests/simple_request.c \
test/core/end2end/tests/streaming_error_response.c \ test/core/end2end/tests/streaming_error_response.c \
test/core/end2end/tests/trailing_metadata.c \ test/core/end2end/tests/trailing_metadata.c \
test/core/end2end/tests/authority_not_supported.c \
PUBLIC_HEADERS_C += \ PUBLIC_HEADERS_C += \

@ -843,6 +843,7 @@ Pod::Spec.new do |s|
ss.source_files = 'test/core/end2end/cq_verifier.{c,h}', ss.source_files = 'test/core/end2end/cq_verifier.{c,h}',
'test/core/end2end/end2end_tests.{c,h}', 'test/core/end2end/end2end_tests.{c,h}',
'test/core/end2end/end2end_test_utils.c',
'test/core/end2end/tests/*.{c,h}', 'test/core/end2end/tests/*.{c,h}',
'test/core/end2end/data/*.{c,h}', 'test/core/end2end/data/*.{c,h}',
'test/core/util/test_config.{c,h}', 'test/core/util/test_config.{c,h}',

@ -610,6 +610,16 @@ static int parse_grpc_header(const uint8_t *data) {
return length; return length;
} }
static bool header_has_authority(grpc_linked_mdelem *head) {
while (head != NULL) {
if (head->md->key == GRPC_MDSTR_AUTHORITY) {
return true;
}
head = head->next;
}
return false;
}
/* /*
Op Execution: Decide if one of the actions contained in the stream op can be Op Execution: Decide if one of the actions contained in the stream op can be
executed. This is the heart of the state machine. executed. This is the heart of the state machine.
@ -981,11 +991,18 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
} else if (stream_op->on_complete && } else if (stream_op->on_complete &&
op_can_be_run(stream_op, stream_state, &oas->state, op_can_be_run(stream_op, stream_state, &oas->state,
OP_ON_COMPLETE)) { OP_ON_COMPLETE)) {
/* All actions in this stream_op are complete. Call the on_complete callback
*/
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas); CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE, if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
NULL); stream_state->state_callback_received[OP_FAILED]) {
grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete,
GRPC_ERROR_CANCELLED, NULL);
} else {
/* All actions in this stream_op are complete. Call the on_complete
* callback
*/
grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE,
NULL);
}
oas->state.state_op_done[OP_ON_COMPLETE] = true; oas->state.state_op_done[OP_ON_COMPLETE] = true;
oas->done = true; oas->done = true;
/* reset any send message state, only if this ON_COMPLETE is about a send. /* reset any send message state, only if this ON_COMPLETE is about a send.
@ -1042,7 +1059,31 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->curr_gs = gs; s->curr_gs = gs;
memcpy(&s->curr_ct, gt, sizeof(grpc_cronet_transport)); memcpy(&s->curr_ct, gt, sizeof(grpc_cronet_transport));
add_to_storage(s, op); add_to_storage(s, op);
execute_from_storage(s); if (op->send_initial_metadata &&
header_has_authority(op->send_initial_metadata->list.head)) {
/* Cronet does not support :authority header field. We cancel the call when
this field is present in metadata */
cronet_bidirectional_stream_header_array header_array;
cronet_bidirectional_stream_header *header;
cronet_bidirectional_stream cbs;
CRONET_LOG(GPR_DEBUG,
":authority header is provided but not supported;"
" cancel operations");
/* Notify application that operation is cancelled by forging trailers */
header_array.count = 1;
header_array.capacity = 1;
header_array.headers =
gpr_malloc(sizeof(cronet_bidirectional_stream_header));
header = (cronet_bidirectional_stream_header *)header_array.headers;
header->key = "grpc-status";
header->value = "1"; /* Return status GRPC_STATUS_CANCELLED */
cbs.annotation = (void *)s;
s->state.state_op_done[OP_CANCEL_ERROR] = true;
on_response_trailers_received(&cbs, &header_array);
gpr_free(header_array.headers);
} else {
execute_from_storage(s);
}
} }
static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,

@ -95,6 +95,8 @@ static void pipe_destroy(grpc_wakeup_fd* fd_info) {
static int pipe_check_availability(void) { static int pipe_check_availability(void) {
grpc_wakeup_fd fd; grpc_wakeup_fd fd;
fd.read_fd = fd.write_fd = -1;
if (pipe_init(&fd) == GRPC_ERROR_NONE) { if (pipe_init(&fd) == GRPC_ERROR_NONE) {
pipe_destroy(&fd); pipe_destroy(&fd);
return 1; return 1;

@ -125,7 +125,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx,
h->auth_context); h->auth_context);
} else { } else {
const char *msg = grpc_error_string(error); const char *msg = grpc_error_string(error);
gpr_log(GPR_ERROR, "Security handshake failed: %s", msg); gpr_log(GPR_INFO, "Security handshake failed: %s", msg);
grpc_error_free_string(msg); grpc_error_free_string(msg);
if (h->secure_endpoint != NULL) { if (h->secure_endpoint != NULL) {

@ -228,6 +228,10 @@ static char *roots_filename;
// TODO(mxyan): Use NSStringFromSelector(_cmd) to acquire test name from the // TODO(mxyan): Use NSStringFromSelector(_cmd) to acquire test name from the
// test case method name, so that bodies of test cases can stay identical // test case method name, so that bodies of test cases can stay identical
- (void)testAuthorityNotSupported {
[self testIndividualCase:"authority_not_supported"];
}
- (void)testBadHostname { - (void)testBadHostname {
[self testIndividualCase:"bad_hostname"]; [self testIndividualCase:"bad_hostname"];
} }

@ -219,9 +219,12 @@ message ScenarioResultSummary
double latency_99 = 10; double latency_99 = 10;
double latency_999 = 11; double latency_999 = 11;
// server cpu usage percentage
double server_cpu_usage = 12;
// Number of requests that succeeded/failed // Number of requests that succeeded/failed
double successful_requests_per_second = 12; double successful_requests_per_second = 13;
double failed_requests_per_second = 13; double failed_requests_per_second = 14;
} }
// Results of a single benchmark scenario. // Results of a single benchmark scenario.

@ -41,6 +41,12 @@ message ServerStats {
// change in server time (in seconds) used by the server process and all // change in server time (in seconds) used by the server process and all
// threads since last reset // threads since last reset
double time_system = 3; double time_system = 3;
// change in total cpu time of the server (data from proc/stat)
uint64 total_cpu_time = 4;
// change in idle time of the server (data from proc/stat)
uint64 idle_cpu_time = 5;
} }
// Histogram params based on grpc/support/histogram.c // Histogram params based on grpc/support/histogram.c

@ -173,6 +173,7 @@
ss.source_files = 'test/core/end2end/cq_verifier.{c,h}', ss.source_files = 'test/core/end2end/cq_verifier.{c,h}',
'test/core/end2end/end2end_tests.{c,h}', 'test/core/end2end/end2end_tests.{c,h}',
'test/core/end2end/end2end_test_utils.c',
'test/core/end2end/tests/*.{c,h}', 'test/core/end2end/tests/*.{c,h}',
'test/core/end2end/data/*.{c,h}', 'test/core/end2end/data/*.{c,h}',
'test/core/util/test_config.{c,h}', 'test/core/util/test_config.{c,h}',

@ -43,6 +43,8 @@
static bool g_pre_init_called = false; static bool g_pre_init_called = false;
extern void authority_not_supported(grpc_end2end_test_config config);
extern void authority_not_supported_pre_init(void);
extern void bad_hostname(grpc_end2end_test_config config); extern void bad_hostname(grpc_end2end_test_config config);
extern void bad_hostname_pre_init(void); extern void bad_hostname_pre_init(void);
extern void binary_metadata(grpc_end2end_test_config config); extern void binary_metadata(grpc_end2end_test_config config);
@ -135,6 +137,7 @@ extern void trailing_metadata_pre_init(void);
void grpc_end2end_tests_pre_init(void) { void grpc_end2end_tests_pre_init(void) {
GPR_ASSERT(!g_pre_init_called); GPR_ASSERT(!g_pre_init_called);
g_pre_init_called = true; g_pre_init_called = true;
authority_not_supported_pre_init();
bad_hostname_pre_init(); bad_hostname_pre_init();
binary_metadata_pre_init(); binary_metadata_pre_init();
cancel_after_accept_pre_init(); cancel_after_accept_pre_init();
@ -188,6 +191,7 @@ void grpc_end2end_tests(int argc, char **argv,
GPR_ASSERT(g_pre_init_called); GPR_ASSERT(g_pre_init_called);
if (argc <= 1) { if (argc <= 1) {
authority_not_supported(config);
bad_hostname(config); bad_hostname(config);
binary_metadata(config); binary_metadata(config);
cancel_after_accept(config); cancel_after_accept(config);
@ -236,6 +240,10 @@ void grpc_end2end_tests(int argc, char **argv,
} }
for (i = 1; i < argc; i++) { for (i = 1; i < argc; i++) {
if (0 == strcmp("authority_not_supported", argv[i])) {
authority_not_supported(config);
continue;
}
if (0 == strcmp("bad_hostname", argv[i])) { if (0 == strcmp("bad_hostname", argv[i])) {
bad_hostname(config); bad_hostname(config);
continue; continue;

@ -43,6 +43,8 @@
static bool g_pre_init_called = false; static bool g_pre_init_called = false;
extern void authority_not_supported(grpc_end2end_test_config config);
extern void authority_not_supported_pre_init(void);
extern void bad_hostname(grpc_end2end_test_config config); extern void bad_hostname(grpc_end2end_test_config config);
extern void bad_hostname_pre_init(void); extern void bad_hostname_pre_init(void);
extern void binary_metadata(grpc_end2end_test_config config); extern void binary_metadata(grpc_end2end_test_config config);
@ -137,6 +139,7 @@ extern void trailing_metadata_pre_init(void);
void grpc_end2end_tests_pre_init(void) { void grpc_end2end_tests_pre_init(void) {
GPR_ASSERT(!g_pre_init_called); GPR_ASSERT(!g_pre_init_called);
g_pre_init_called = true; g_pre_init_called = true;
authority_not_supported_pre_init();
bad_hostname_pre_init(); bad_hostname_pre_init();
binary_metadata_pre_init(); binary_metadata_pre_init();
call_creds_pre_init(); call_creds_pre_init();
@ -191,6 +194,7 @@ void grpc_end2end_tests(int argc, char **argv,
GPR_ASSERT(g_pre_init_called); GPR_ASSERT(g_pre_init_called);
if (argc <= 1) { if (argc <= 1) {
authority_not_supported(config);
bad_hostname(config); bad_hostname(config);
binary_metadata(config); binary_metadata(config);
call_creds(config); call_creds(config);
@ -240,6 +244,10 @@ void grpc_end2end_tests(int argc, char **argv,
} }
for (i = 1; i < argc; i++) { for (i = 1; i < argc; i++) {
if (0 == strcmp("authority_not_supported", argv[i])) {
authority_not_supported(config);
continue;
}
if (0 == strcmp("bad_hostname", argv[i])) { if (0 == strcmp("bad_hostname", argv[i])) {
bad_hostname(config); bad_hostname(config);
continue; continue;

@ -141,6 +141,7 @@ END2END_TESTS = {
'simple_request': default_test_options, 'simple_request': default_test_options,
'streaming_error_response': default_test_options, 'streaming_error_response': default_test_options,
'trailing_metadata': default_test_options, 'trailing_metadata': default_test_options,
'authority_not_supported': default_test_options,
} }

@ -0,0 +1,193 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "%s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_time(int n) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(n);
}
static gpr_timespec five_seconds_time(void) { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(
f->cq, tag(1000), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5), NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
}
/* Request/response with metadata and payload.*/
static void test_with_authority_header(grpc_end2end_test_config config) {
grpc_call *c;
grpc_slice request_payload_slice = grpc_slice_from_copied_string("hello world");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_timespec deadline = five_seconds_time();
grpc_metadata meta_c[2] = {
{"key1", "val1", 4, 0, {{NULL, NULL, NULL, NULL}}},
{"key2", "val2", 4, 0, {{NULL, NULL, NULL, NULL}}}};
grpc_end2end_test_fixture f =
begin_test(config, "test_with_authority_header", NULL, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_byte_buffer *response_payload_recv = NULL;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
"/foo", "foo.test.google.fr", deadline, NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 2;
op->data.send_initial_metadata.metadata = meta_c;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_CANCELLED);
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_call_destroy(c);
cq_verifier_destroy(cqv);
grpc_byte_buffer_destroy(request_payload);
grpc_byte_buffer_destroy(response_payload_recv);
end_test(&f);
config.tear_down_data(&f);
}
void authority_not_supported(grpc_end2end_test_config config) {
if (config.feature_mask & FEATURE_MASK_SUPPORTS_AUTHORITY_HEADER) {
return;
}
test_with_authority_header(config);
}
void authority_not_supported_pre_init(void) {}

@ -125,6 +125,8 @@ static double UserTime(ClientStats s) { return s.time_user(); }
static double ServerWallTime(ServerStats s) { return s.time_elapsed(); } static double ServerWallTime(ServerStats s) { return s.time_elapsed(); }
static double ServerSystemTime(ServerStats s) { return s.time_system(); } static double ServerSystemTime(ServerStats s) { return s.time_system(); }
static double ServerUserTime(ServerStats s) { return s.time_user(); } static double ServerUserTime(ServerStats s) { return s.time_user(); }
static double ServerTotalCpuTime(ServerStats s) { return s.total_cpu_time(); }
static double ServerIdleCpuTime(ServerStats s) { return s.idle_cpu_time(); }
static int Cores(int n) { return n; } static int Cores(int n) { return n; }
// Postprocess ScenarioResult and populate result summary. // Postprocess ScenarioResult and populate result summary.
@ -149,6 +151,7 @@ static void postprocess_scenario_result(ScenarioResult* result) {
sum(result->server_stats(), ServerWallTime); sum(result->server_stats(), ServerWallTime);
auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) /
sum(result->server_stats(), ServerWallTime); sum(result->server_stats(), ServerWallTime);
auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) /
sum(result->client_stats(), WallTime); sum(result->client_stats(), WallTime);
auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) /
@ -159,6 +162,18 @@ static void postprocess_scenario_result(ScenarioResult* result) {
result->mutable_summary()->set_client_system_time(client_system_time); result->mutable_summary()->set_client_system_time(client_system_time);
result->mutable_summary()->set_client_user_time(client_user_time); result->mutable_summary()->set_client_user_time(client_user_time);
// For Non-linux platform, get_cpu_usage() is not implemented. Thus,
// ServerTotalCpuTime and ServerIdleCpuTime are both 0.
if (average(result->server_stats(), ServerTotalCpuTime) == 0) {
result->mutable_summary()->set_server_cpu_usage(0);
} else {
auto server_cpu_usage =
100 -
100 * average(result->server_stats(), ServerIdleCpuTime) /
average(result->server_stats(), ServerTotalCpuTime);
result->mutable_summary()->set_server_cpu_usage(server_cpu_usage);
}
if (result->request_results_size() > 0) { if (result->request_results_size() > 0) {
int64_t successes = 0; int64_t successes = 0;
int64_t failures = 0; int64_t failures = 0;

@ -49,10 +49,111 @@ DEFINE_string(scenarios_file, "",
DEFINE_string(scenarios_json, "", DEFINE_string(scenarios_json, "",
"JSON string containing an array of Scenario objects"); "JSON string containing an array of Scenario objects");
DEFINE_bool(quit, false, "Quit the workers"); DEFINE_bool(quit, false, "Quit the workers");
DEFINE_string(search_param, "",
"The parameter, whose value is to be searched for to achieve "
"targeted cpu load. For now, we have 'offered_load'. Later, "
"'num_channels', 'num_outstanding_requests', etc. shall be "
"added.");
DEFINE_double(
initial_search_value, 0.0,
"initial parameter value to start the search with (i.e. lower bound)");
DEFINE_double(targeted_cpu_load, 70.0,
"Targeted cpu load (unit: %, range [0,100])");
DEFINE_double(stride, 1,
"Defines each stride of the search. The larger the stride is, "
"the coarser the result will be, but will also be faster.");
DEFINE_double(error_tolerance, 0.01,
"Defines threshold for stopping the search. When current search "
"range is narrower than the error_tolerance computed range, we "
"stop the search.");
namespace grpc { namespace grpc {
namespace testing { namespace testing {
static std::unique_ptr<ScenarioResult> RunAndReport(const Scenario& scenario,
bool* success) {
std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n";
auto result =
RunScenario(scenario.client_config(), scenario.num_clients(),
scenario.server_config(), scenario.num_servers(),
scenario.warmup_seconds(), scenario.benchmark_seconds(),
scenario.spawn_local_worker_count());
// Amend the result with scenario config. Eventually we should adjust
// RunScenario contract so we don't need to touch the result here.
result->mutable_scenario()->CopyFrom(scenario);
GetReporter()->ReportQPS(*result);
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
GetReporter()->ReportTimes(*result);
GetReporter()->ReportCpuUsage(*result);
for (int i = 0; *success && i < result->client_success_size(); i++) {
*success = result->client_success(i);
}
for (int i = 0; *success && i < result->server_success_size(); i++) {
*success = result->server_success(i);
}
return result;
}
static double GetCpuLoad(Scenario* scenario, double offered_load,
bool* success) {
scenario->mutable_client_config()
->mutable_load_params()
->mutable_poisson()
->set_offered_load(offered_load);
auto result = RunAndReport(*scenario, success);
return result->summary().server_cpu_usage();
}
static double BinarySearch(Scenario* scenario, double targeted_cpu_load,
double low, double high, bool* success) {
while (low <= high * (1 - FLAGS_error_tolerance)) {
double mid = low + (high - low) / 2;
double current_cpu_load = GetCpuLoad(scenario, mid, success);
gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f", mid);
if (!*success) {
gpr_log(GPR_ERROR, "Client/Server Failure");
break;
}
if (targeted_cpu_load <= current_cpu_load) {
high = mid - FLAGS_stride;
} else {
low = mid + FLAGS_stride;
}
}
return low;
}
static double SearchOfferedLoad(double initial_offered_load,
double targeted_cpu_load, Scenario* scenario,
bool* success) {
std::cerr << "RUNNING SCENARIO: " << scenario->name() << "\n";
double current_offered_load = initial_offered_load;
double current_cpu_load = GetCpuLoad(scenario, current_offered_load, success);
if (current_cpu_load > targeted_cpu_load) {
gpr_log(GPR_ERROR, "Initial offered load too high");
return -1;
}
while (*success && (current_cpu_load < targeted_cpu_load)) {
current_offered_load *= 2;
current_cpu_load = GetCpuLoad(scenario, current_offered_load, success);
gpr_log(GPR_DEBUG, "Binary Search: current_offered_load %.0f",
current_offered_load);
}
double targeted_offered_load =
BinarySearch(scenario, targeted_cpu_load, current_offered_load / 2,
current_offered_load, success);
return targeted_offered_load;
}
static bool QpsDriver() { static bool QpsDriver() {
grpc::string json; grpc::string json;
@ -68,11 +169,11 @@ static bool QpsDriver() {
if (scfile) { if (scfile) {
// Read the json data from disk // Read the json data from disk
FILE *json_file = fopen(FLAGS_scenarios_file.c_str(), "r"); FILE* json_file = fopen(FLAGS_scenarios_file.c_str(), "r");
GPR_ASSERT(json_file != NULL); GPR_ASSERT(json_file != NULL);
fseek(json_file, 0, SEEK_END); fseek(json_file, 0, SEEK_END);
long len = ftell(json_file); long len = ftell(json_file);
char *data = new char[len]; char* data = new char[len];
fseek(json_file, 0, SEEK_SET); fseek(json_file, 0, SEEK_SET);
GPR_ASSERT(len == (long)fread(data, 1, len, json_file)); GPR_ASSERT(len == (long)fread(data, 1, len, json_file));
fclose(json_file); fclose(json_file);
@ -93,28 +194,19 @@ static bool QpsDriver() {
GPR_ASSERT(scenarios.scenarios_size() > 0); GPR_ASSERT(scenarios.scenarios_size() > 0);
for (int i = 0; i < scenarios.scenarios_size(); i++) { for (int i = 0; i < scenarios.scenarios_size(); i++) {
const Scenario &scenario = scenarios.scenarios(i); if (FLAGS_search_param == "") {
std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; const Scenario& scenario = scenarios.scenarios(i);
auto result = RunAndReport(scenario, &success);
RunScenario(scenario.client_config(), scenario.num_clients(), } else {
scenario.server_config(), scenario.num_servers(), if (FLAGS_search_param == "offered_load") {
scenario.warmup_seconds(), scenario.benchmark_seconds(), Scenario* scenario = scenarios.mutable_scenarios(i);
scenario.spawn_local_worker_count()); double targeted_offered_load =
SearchOfferedLoad(FLAGS_initial_search_value,
// Amend the result with scenario config. Eventually we should adjust FLAGS_targeted_cpu_load, scenario, &success);
// RunScenario contract so we don't need to touch the result here. gpr_log(GPR_INFO, "targeted_offered_load %f", targeted_offered_load);
result->mutable_scenario()->CopyFrom(scenario); } else {
gpr_log(GPR_ERROR, "Unimplemented search param");
GetReporter()->ReportQPS(*result); }
GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result);
GetReporter()->ReportTimes(*result);
for (int i = 0; success && i < result->client_success_size(); i++) {
success = result->client_success(i);
}
for (int i = 0; success && i < result->server_success_size(); i++) {
success = result->server_success(i);
} }
} }
return success; return success;
@ -123,7 +215,7 @@ static bool QpsDriver() {
} // namespace testing } // namespace testing
} // namespace grpc } // namespace grpc
int main(int argc, char **argv) { int main(int argc, char** argv) {
grpc::testing::InitBenchmark(&argc, &argv, true); grpc::testing::InitBenchmark(&argc, &argv, true);
bool ok = grpc::testing::QpsDriver(); bool ok = grpc::testing::QpsDriver();

@ -71,6 +71,12 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) {
} }
} }
void CompositeReporter::ReportCpuUsage(const ScenarioResult& result) {
for (size_t i = 0; i < reporters_.size(); ++i) {
reporters_[i]->ReportCpuUsage(result);
}
}
void GprLogReporter::ReportQPS(const ScenarioResult& result) { void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps()); gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps());
if (result.summary().failed_requests_per_second() > 0) { if (result.summary().failed_requests_per_second() > 0) {
@ -107,6 +113,11 @@ void GprLogReporter::ReportTimes(const ScenarioResult& result) {
result.summary().client_user_time()); result.summary().client_user_time());
} }
void GprLogReporter::ReportCpuUsage(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server CPU usage: %.2f%%",
result.summary().server_cpu_usage());
}
void JsonReporter::ReportQPS(const ScenarioResult& result) { void JsonReporter::ReportQPS(const ScenarioResult& result) {
grpc::string json_string = grpc::string json_string =
SerializeJson(result, "type.googleapis.com/grpc.testing.ScenarioResult"); SerializeJson(result, "type.googleapis.com/grpc.testing.ScenarioResult");
@ -127,5 +138,9 @@ void JsonReporter::ReportTimes(const ScenarioResult& result) {
// NOP - all reporting is handled by ReportQPS. // NOP - all reporting is handled by ReportQPS.
} }
void JsonReporter::ReportCpuUsage(const ScenarioResult& result) {
// NOP - all reporting is handled by ReportQPS.
}
} // namespace testing } // namespace testing
} // namespace grpc } // namespace grpc

@ -70,6 +70,9 @@ class Reporter {
/** Reports system and user time for client and server systems. */ /** Reports system and user time for client and server systems. */
virtual void ReportTimes(const ScenarioResult& result) = 0; virtual void ReportTimes(const ScenarioResult& result) = 0;
/** Reports server cpu usage. */
virtual void ReportCpuUsage(const ScenarioResult& result) = 0;
private: private:
const string name_; const string name_;
}; };
@ -86,6 +89,7 @@ class CompositeReporter : public Reporter {
void ReportQPSPerCore(const ScenarioResult& result) override; void ReportQPSPerCore(const ScenarioResult& result) override;
void ReportLatency(const ScenarioResult& result) override; void ReportLatency(const ScenarioResult& result) override;
void ReportTimes(const ScenarioResult& result) override; void ReportTimes(const ScenarioResult& result) override;
void ReportCpuUsage(const ScenarioResult& result) override;
private: private:
std::vector<std::unique_ptr<Reporter> > reporters_; std::vector<std::unique_ptr<Reporter> > reporters_;
@ -101,6 +105,7 @@ class GprLogReporter : public Reporter {
void ReportQPSPerCore(const ScenarioResult& result) override; void ReportQPSPerCore(const ScenarioResult& result) override;
void ReportLatency(const ScenarioResult& result) override; void ReportLatency(const ScenarioResult& result) override;
void ReportTimes(const ScenarioResult& result) override; void ReportTimes(const ScenarioResult& result) override;
void ReportCpuUsage(const ScenarioResult& result) override;
}; };
/** Dumps the report to a JSON file. */ /** Dumps the report to a JSON file. */
@ -114,6 +119,7 @@ class JsonReporter : public Reporter {
void ReportQPSPerCore(const ScenarioResult& result) override; void ReportQPSPerCore(const ScenarioResult& result) override;
void ReportLatency(const ScenarioResult& result) override; void ReportLatency(const ScenarioResult& result) override;
void ReportTimes(const ScenarioResult& result) override; void ReportTimes(const ScenarioResult& result) override;
void ReportCpuUsage(const ScenarioResult& result) override;
const string report_file_; const string report_file_;
}; };

@ -75,6 +75,8 @@ class Server {
stats.set_time_elapsed(timer_result.wall); stats.set_time_elapsed(timer_result.wall);
stats.set_time_system(timer_result.system); stats.set_time_system(timer_result.system);
stats.set_time_user(timer_result.user); stats.set_time_user(timer_result.user);
stats.set_total_cpu_time(timer_result.total_cpu_time);
stats.set_idle_cpu_time(timer_result.idle_cpu_time);
return stats; return stats;
} }

@ -33,10 +33,14 @@
#include "test/cpp/qps/usage_timer.h" #include "test/cpp/qps/usage_timer.h"
#include <fstream>
#include <sstream>
#include <string>
#include <grpc/support/log.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#include <sys/resource.h> #include <sys/resource.h>
#include <sys/time.h> #include <sys/time.h>
UsageTimer::UsageTimer() : start_(Sample()) {} UsageTimer::UsageTimer() : start_(Sample()) {}
double UsageTimer::Now() { double UsageTimer::Now() {
@ -48,6 +52,27 @@ static double time_double(struct timeval* tv) {
return tv->tv_sec + 1e-6 * tv->tv_usec; return tv->tv_sec + 1e-6 * tv->tv_usec;
} }
static void get_cpu_usage(unsigned long long* total_cpu_time,
unsigned long long* idle_cpu_time) {
#ifdef __linux__
std::ifstream proc_stat("/proc/stat");
proc_stat.ignore(5);
std::string cpu_time_str;
std::string first_line;
std::getline(proc_stat, first_line);
std::stringstream first_line_s(first_line);
for (int i = 0; i < 10; ++i) {
std::getline(first_line_s, cpu_time_str, ' ');
*total_cpu_time += std::stol(cpu_time_str);
if (i == 3) {
*idle_cpu_time = std::stol(cpu_time_str);
}
}
#else
gpr_log(GPR_INFO, "get_cpu_usage(): Non-linux platform is not supported.");
#endif
}
UsageTimer::Result UsageTimer::Sample() { UsageTimer::Result UsageTimer::Sample() {
struct rusage usage; struct rusage usage;
struct timeval tv; struct timeval tv;
@ -58,6 +83,9 @@ UsageTimer::Result UsageTimer::Sample() {
r.wall = time_double(&tv); r.wall = time_double(&tv);
r.user = time_double(&usage.ru_utime); r.user = time_double(&usage.ru_utime);
r.system = time_double(&usage.ru_stime); r.system = time_double(&usage.ru_stime);
r.total_cpu_time = 0;
r.idle_cpu_time = 0;
get_cpu_usage(&r.total_cpu_time, &r.idle_cpu_time);
return r; return r;
} }
@ -67,5 +95,8 @@ UsageTimer::Result UsageTimer::Mark() const {
r.wall = s.wall - start_.wall; r.wall = s.wall - start_.wall;
r.user = s.user - start_.user; r.user = s.user - start_.user;
r.system = s.system - start_.system; r.system = s.system - start_.system;
r.total_cpu_time = s.total_cpu_time - start_.total_cpu_time;
r.idle_cpu_time = s.idle_cpu_time - start_.idle_cpu_time;
return r; return r;
} }

@ -42,6 +42,8 @@ class UsageTimer {
double wall; double wall;
double user; double user;
double system; double system;
unsigned long long total_cpu_time;
unsigned long long idle_cpu_time;
}; };
Result Mark() const; Result Mark() const;

@ -31,7 +31,7 @@
"""Filter out tests based on file differences compared to merge target branch""" """Filter out tests based on file differences compared to merge target branch"""
import re import re
from subprocess import call, check_output from subprocess import check_output
class TestSuite: class TestSuite:
@ -56,7 +56,6 @@ class TestSuite:
# Create test suites # Create test suites
_SANITY_TEST_SUITE = TestSuite(['sanity'])
_CORE_TEST_SUITE = TestSuite(['c']) _CORE_TEST_SUITE = TestSuite(['c'])
_CPP_TEST_SUITE = TestSuite(['c++']) _CPP_TEST_SUITE = TestSuite(['c++'])
_CSHARP_TEST_SUITE = TestSuite(['csharp']) _CSHARP_TEST_SUITE = TestSuite(['csharp'])
@ -68,15 +67,16 @@ _RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux']) _LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows']) _WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos']) _MACOS_TEST_SUITE = TestSuite(['macos'])
_ALL_TEST_SUITES = [_SANITY_TEST_SUITE, _CORE_TEST_SUITE, _CPP_TEST_SUITE, _ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
_CSHARP_TEST_SUITE, _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
_PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
_LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE] _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
# Dictionary of whitelistable files where the key is a regex matching changed files # Dictionary of whitelistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that # and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not # the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests # match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_WHITELIST_DICT = { _WHITELIST_DICT = {
'^doc/': [], '^doc/': [],
'^examples/': [], '^examples/': [],
@ -89,7 +89,7 @@ _WHITELIST_DICT = {
'^src/php/': [_PHP_TEST_SUITE], '^src/php/': [_PHP_TEST_SUITE],
'^src/python/': [_PYTHON_TEST_SUITE], '^src/python/': [_PYTHON_TEST_SUITE],
'^src/ruby/': [_RUBY_TEST_SUITE], '^src/ruby/': [_RUBY_TEST_SUITE],
'^templates/': [_SANITY_TEST_SUITE], '^templates/': [],
'^test/core/': [_CORE_TEST_SUITE], '^test/core/': [_CORE_TEST_SUITE],
'^test/cpp/': [_CPP_TEST_SUITE], '^test/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/cpp/': [_CPP_TEST_SUITE], '^test/distrib/cpp/': [_CPP_TEST_SUITE],
@ -104,7 +104,7 @@ _WHITELIST_DICT = {
'config\.m4$': [_PHP_TEST_SUITE], 'config\.m4$': [_PHP_TEST_SUITE],
'CONTRIBUTING\.md$': [], 'CONTRIBUTING\.md$': [],
'Gemfile$': [_RUBY_TEST_SUITE], 'Gemfile$': [_RUBY_TEST_SUITE],
'grpc.def$': [_WINDOWS_TEST_SUITE], 'grpc\.def$': [_WINDOWS_TEST_SUITE],
'grpc\.gemspec$': [_RUBY_TEST_SUITE], 'grpc\.gemspec$': [_RUBY_TEST_SUITE],
'gRPC\.podspec$': [_OBJC_TEST_SUITE], 'gRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE], 'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
@ -171,11 +171,11 @@ def filter_tests(tests, base_branch):
:param tests: list of all tests generated by run_tests_matrix.py :param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests :return: list of relevant tests
""" """
print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch) print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
changed_files = _get_changed_files(base_branch) changed_files = _get_changed_files(base_branch)
for changed_file in changed_files: for changed_file in changed_files:
print(changed_file) print(' %s' % changed_file)
print print('')
# Regex that combines all keys in _WHITELIST_DICT # Regex that combines all keys in _WHITELIST_DICT
all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")" all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
@ -188,8 +188,8 @@ def filter_tests(tests, base_branch):
for test_suite in _ALL_TEST_SUITES: for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers): if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels: for label in test_suite.labels:
print(" Filtering %s tests" % label) print(' %s tests safe to skip' % label)
skippable_labels.append(label) skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels) tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests return tests

@ -96,6 +96,7 @@ _COLORS = {
'lightgray': [ 37, 0], 'lightgray': [ 37, 0],
'gray': [ 30, 1 ], 'gray': [ 30, 1 ],
'purple': [ 35, 0 ], 'purple': [ 35, 0 ],
'cyan': [ 36, 0 ]
} }
@ -114,6 +115,7 @@ _TAG_COLOR = {
'WAITING': 'yellow', 'WAITING': 'yellow',
'SUCCESS': 'green', 'SUCCESS': 'green',
'IDLE': 'gray', 'IDLE': 'gray',
'SKIPPED': 'cyan'
} }
@ -450,7 +452,16 @@ def run(cmdlines,
travis=False, travis=False,
infinite_runs=False, infinite_runs=False,
stop_on_failure=False, stop_on_failure=False,
add_env={}): add_env={},
skip_jobs=False):
if skip_jobs:
results = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
results[job.shortname] = [skipped_job_result]
return results
js = Jobset(check_cancelled, js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env) newline_on_success, travis, stop_on_failure, add_env)

@ -74,6 +74,8 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
ET.SubElement(xml_test, 'failure', message='Failure') ET.SubElement(xml_test, 'failure', message='Failure')
elif result.state == 'TIMEOUT': elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout') ET.SubElement(xml_test, 'error', message='Timeout')
elif result.state == 'SKIPPED':
ET.SubElement(xml_test, 'skipped', message='Skipped')
tree = ET.ElementTree(root) tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8') tree.write(xml_report, encoding='UTF-8')

@ -241,105 +241,110 @@ def _allowed_labels():
return sorted(all_labels) return sorted(all_labels)
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') if __name__ == "__main__":
argp.add_argument('-j', '--jobs', argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, argp.add_argument('-j', '--jobs',
type=int, default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
help='Number of concurrent run_tests.py instances.') type=int,
argp.add_argument('-f', '--filter', help='Number of concurrent run_tests.py instances.')
choices=_allowed_labels(), argp.add_argument('-f', '--filter',
nargs='+', choices=_allowed_labels(),
default=[], nargs='+',
help='Filter targets to run by label with AND semantics.') default=[],
argp.add_argument('--build_only', help='Filter targets to run by label with AND semantics.')
default=False, argp.add_argument('--build_only',
action='store_const', default=False,
const=True, action='store_const',
help='Pass --build_only flag to run_tests.py instances.') const=True,
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, help='Pass --build_only flag to run_tests.py instances.')
help='Pass --force_default_poller to run_tests.py instances.') argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
argp.add_argument('--dry_run', help='Pass --force_default_poller to run_tests.py instances.')
default=False, argp.add_argument('--dry_run',
action='store_const', default=False,
const=True, action='store_const',
help='Only print what would be run.') const=True,
argp.add_argument('--filter_pr_tests', help='Only print what would be run.')
default=False, argp.add_argument('--filter_pr_tests',
action='store_const', default=False,
const=True, action='store_const',
help='Filters out tests irrelavant to pull request changes.') const=True,
argp.add_argument('--base_branch', help='Filters out tests irrelavant to pull request changes.')
default='origin/master', argp.add_argument('--base_branch',
type=str, default='origin/master',
help='Branch that pull request is requesting to merge into') type=str,
argp.add_argument('--inner_jobs', help='Branch that pull request is requesting to merge into')
default=_DEFAULT_INNER_JOBS, argp.add_argument('--inner_jobs',
type=int, default=_DEFAULT_INNER_JOBS,
help='Number of jobs in each run_tests.py instance') type=int,
args = argp.parse_args() help='Number of jobs in each run_tests.py instance')
args = argp.parse_args()
extra_args = [] extra_args = []
if args.build_only: if args.build_only:
extra_args.append('--build_only') extra_args.append('--build_only')
if args.force_default_poller: if args.force_default_poller:
extra_args.append('--force_default_poller') extra_args.append('--force_default_poller')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = [] jobs = []
for job in all_jobs: for job in all_jobs:
if not args.filter or all(filter in job.labels for filter in args.filter): if not args.filter or all(filter in job.labels for filter in args.filter):
jobs.append(job) jobs.append(job)
if not jobs: if not jobs:
jobset.message('FAILED', 'No test suites match given criteria.', jobset.message('FAILED', 'No test suites match given criteria.',
do_newline=True) do_newline=True)
sys.exit(1) sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed') print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be') print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.') print('copied to the docker environment or into subworkspaces.')
print skipped_jobs = []
print 'Will run these tests:'
for job in jobs: if args.filter_pr_tests:
if args.dry_run: print('Looking for irrelevant tests to skip...')
print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)) relevant_jobs = filter_tests(jobs, args.base_branch)
else: if len(relevant_jobs) == len(jobs):
print ' %s' % job.shortname print('No tests will be skipped.')
print else:
print('These tests will be skipped:')
if args.filter_pr_tests: skipped_jobs = [job for job in jobs if job not in relevant_jobs]
print 'IMPORTANT: Test filtering is not active; this is only for testing.' for job in list(skipped_jobs):
relevant_jobs = filter_tests(jobs, args.base_branch) print(' %s' % job.shortname)
# todo(mattkwong): add skipped tests to report.xml jobs = relevant_jobs
print
if len(relevant_jobs) == len(jobs): print('Will run these tests:')
print '(TESTING) No tests will be skipped.' for job in jobs:
else: if args.dry_run:
print '(TESTING) These tests will be skipped:' print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
for job in list(set(jobs) - set(relevant_jobs)): else:
print ' %s' % job.shortname print(' %s' % job.shortname)
print print
if args.dry_run: if args.dry_run:
print '--dry_run was used, exiting' print('--dry_run was used, exiting')
sys.exit(1) sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True) jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs, num_failures, resultset = jobset.run(jobs,
newline_on_success=True, newline_on_success=True,
travis=True, travis=True,
maxjobs=args.jobs) maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'report.xml', # Merge skipped tests into results to show skipped tests on report.xml
suite_name='aggregate_tests') if skipped_jobs:
skipped_results = jobset.run(skipped_jobs,
if num_failures == 0: skip_jobs=True)
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.', resultset.update(skipped_results)
do_newline=True) report_utils.render_junit_xml_report(resultset, 'report.xml',
else: suite_name='aggregate_tests')
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True) if num_failures == 0:
sys.exit(1) jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)

@ -0,0 +1,149 @@
#!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
import re
# hack import paths to pick up extra code
sys.path.insert(0, os.path.abspath('tools/run_tests/'))
from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
import filter_pull_request_tests
_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby']
_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
class TestFilteringTest(unittest.TestCase):
def generate_all_tests(self):
all_jobs = _create_test_jobs() + _create_portability_test_jobs()
self.assertIsNotNone(all_jobs)
return all_jobs
def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
"""
Default args should filter no tests because changed_files is empty and
default labels should be able to match all jobs
:param changed_files: mock list of changed_files from pull request
:param labels: list of job labels that should be skipped
"""
all_jobs = self.generate_all_tests()
# Replacing _get_changed_files function to allow specifying changed files in filter_tests function
def _get_changed_files(foo):
return changed_files
filter_pull_request_tests._get_changed_files = _get_changed_files
print
filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
# Make sure sanity tests aren't being filtered out
sanity_tests_in_all_jobs = 0
sanity_tests_in_filtered_jobs = 0
for job in all_jobs:
if "sanity" in job.labels:
sanity_tests_in_all_jobs += 1
all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
for job in filtered_jobs:
if "sanity" in job.labels:
sanity_tests_in_filtered_jobs += 1
filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels]
self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs)
for label in labels:
for job in filtered_jobs:
self.assertNotIn(label, job.labels)
jobs_matching_labels = 0
for label in labels:
for job in all_jobs:
if (label in job.labels):
jobs_matching_labels += 1
self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
def test_individual_language_filters(self):
# Changing unlisted file should trigger all languages
self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
# Changing core should trigger all tests
self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
# Testing individual languages
self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels])
self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels])
self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels])
self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_combined_language_filters(self):
self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels and label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_platform_filter(self):
self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
def test_whitelist(self):
whitelist = filter_pull_request_tests._WHITELIST_DICT
files_that_should_trigger_all_tests = ['src/core/foo.bar',
'some_file_not_on_the_white_list',
'BUILD',
'etc/roots.pem',
'Makefile',
'tools/foo']
for key in whitelist.keys():
for file_name in files_that_should_trigger_all_tests:
self.assertFalse(re.match(key, file_name))
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -2,6 +2,7 @@
- script: tools/run_tests/sanity/check_cache_mk.sh - script: tools/run_tests/sanity/check_cache_mk.sh
- script: tools/run_tests/sanity/check_sources_and_headers.py - script: tools/run_tests/sanity/check_sources_and_headers.py
- script: tools/run_tests/sanity/check_submodules.sh - script: tools/run_tests/sanity/check_submodules.sh
- script: tools/run_tests/sanity/check_test_filtering.py
- script: tools/buildgen/generate_projects.sh -j 3 - script: tools/buildgen/generate_projects.sh -j 3
cpu_cost: 3 cpu_cost: 3
- script: tools/distrib/check_copyright.py - script: tools/distrib/check_copyright.py

@ -6012,6 +6012,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_census_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -7049,6 +7071,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_compress_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -8041,6 +8085,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_fakesec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -8961,6 +9026,26 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_fd_test",
"platforms": [
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -9998,6 +10083,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -10853,6 +10960,22 @@
"linux" "linux"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full+pipe_test",
"platforms": [
"linux"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -11844,6 +11967,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full+trace_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -12924,6 +13069,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -13961,6 +14127,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_load_reporting_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -15041,6 +15229,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_oauth2_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -15953,6 +16162,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_proxy_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -16913,6 +17143,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_sockpair_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -17801,6 +18052,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_sockpair+trace_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -18815,6 +19087,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_sockpair_1byte_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -19852,6 +20145,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_ssl_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -20889,6 +21204,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_ssl_cert_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -21801,6 +22138,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_ssl_proxy_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -22813,6 +23171,26 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_uds_test",
"platforms": [
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -23827,6 +24205,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_census_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -24841,6 +25241,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_compress_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -25738,6 +26160,26 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_fd_nosec_test",
"platforms": [
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -26752,6 +27194,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -27588,6 +28052,22 @@
"linux" "linux"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full+pipe_nosec_test",
"platforms": [
"linux"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -28556,6 +29036,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_full+trace_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -29612,6 +30114,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -30626,6 +31149,28 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_load_reporting_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -31514,6 +32059,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_proxy_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -32450,6 +33016,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_sockpair_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -33314,6 +33901,27 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_sockpair+trace_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -34302,6 +34910,29 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [
"msan"
],
"flaky": false,
"language": "c",
"name": "h2_sockpair_1byte_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"bad_hostname" "bad_hostname"
@ -35291,6 +35922,26 @@
"posix" "posix"
] ]
}, },
{
"args": [
"authority_not_supported"
],
"ci_platforms": [
"linux",
"mac",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"flaky": false,
"language": "c",
"name": "h2_uds_nosec_test",
"platforms": [
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"--scenarios_json", "--scenarios_json",

Loading…
Cancel
Save