Merge github.com:grpc/grpc into write_completion

reviewable/pr11758/r3
Craig Tiller 7 years ago
commit 2e9afc02ac
  1. 6
      test/core/channel/channel_stack_builder_test.c
  2. 25
      tools/run_tests/run_tests.py

@ -59,10 +59,6 @@ static void channel_func(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
} }
static char *get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
return gpr_strdup("peer");
}
bool g_replacement_fn_called = false; bool g_replacement_fn_called = false;
bool g_original_fn_called = false; bool g_original_fn_called = false;
void set_arg_once_fn(grpc_channel_stack *channel_stack, void set_arg_once_fn(grpc_channel_stack *channel_stack,
@ -94,7 +90,6 @@ const grpc_channel_filter replacement_filter = {
0, 0,
channel_init_func, channel_init_func,
channel_destroy_func, channel_destroy_func,
get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"filter_name"}; "filter_name"};
@ -108,7 +103,6 @@ const grpc_channel_filter original_filter = {
0, 0,
channel_init_func, channel_init_func,
channel_destroy_func, channel_destroy_func,
get_peer,
grpc_channel_next_get_info, grpc_channel_next_get_info,
"filter_name"}; "filter_name"};

@ -69,17 +69,22 @@ _POLLING_STRATEGIES = {
} }
def get_flaky_tests(limit=None): BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
def get_bqtest_data(limit=None):
import big_query_utils import big_query_utils
bq = big_query_utils.create_big_query() bq = big_query_utils.create_big_query()
query = """ query = """
SELECT SELECT
filtered_test_name, filtered_test_name,
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
MAX(cpu_measured) as cpu
FROM ( FROM (
SELECT SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name, REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
result result, cpu_measured
FROM FROM
[grpc-testing:jenkins_test_results.aggregate_results] [grpc-testing:jenkins_test_results.aggregate_results]
WHERE WHERE
@ -89,15 +94,15 @@ SELECT
GROUP BY GROUP BY
filtered_test_name filtered_test_name
HAVING HAVING
SUM(result != 'PASSED' AND result != 'SKIPPED') > 0""" flaky OR cpu > 0"""
if limit: if limit:
query += " limit {}".format(limit) query += " limit {}".format(limit)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query) query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults( page = bq.jobs().getQueryResults(
pageToken=None, pageToken=None,
**query_job['jobReference']).execute(num_retries=3) **query_job['jobReference']).execute(num_retries=3)
flake_names = [row['f'][0]['v'] for row in page['rows']] test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
return flake_names return test_data
def platform_string(): def platform_string():
@ -141,6 +146,9 @@ class Config(object):
if not flaky and shortname and shortname in flaky_tests: if not flaky and shortname and shortname in flaky_tests:
print('Setting %s to flaky' % shortname) print('Setting %s to flaky' % shortname)
flaky = True flaky = True
if shortname in shortname_to_cpu:
print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname]))
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline, return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname, shortname=shortname,
environ=actual_environ, environ=actual_environ,
@ -1254,9 +1262,12 @@ argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action
args = argp.parse_args() args = argp.parse_args()
flaky_tests = set() flaky_tests = set()
shortname_to_cpu = {}
if not args.disable_auto_set_flakes: if not args.disable_auto_set_flakes:
try: try:
flaky_tests = set(get_flaky_tests()) for test in get_bqtest_data():
if test.flaky: flaky_tests.add(test.name)
if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
except: except:
print("Unexpected error getting flaky tests:", sys.exc_info()[0]) print("Unexpected error getting flaky tests:", sys.exc_info()[0])
@ -1516,7 +1527,7 @@ def _build_and_run(
# When running on travis, we want out test runs to be as similar as possible # When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes. # for reproducibility purposes.
if args.travis and args.max_time <= 0: if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname) massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else: else:
# whereas otherwise, we want to shuffle things up to give all tests a # whereas otherwise, we want to shuffle things up to give all tests a
# chance to run. # chance to run.

Loading…
Cancel
Save