Merge pull request #9158 from jtattermusch/deflaking_adjustments

Allow running many runs per test without flooding the logs
pull/8891/merge
Jan Tattermusch 8 years ago committed by GitHub
commit 1a741606fe
  1. 19
      tools/run_tests/jobset.py
  2. 14
      tools/run_tests/run_tests.py
  3. 20
      tools/run_tests/run_tests_matrix.py

@ -219,7 +219,8 @@ class JobResult(object):
class Job(object): class Job(object):
"""Manages one job.""" """Manages one job."""
def __init__(self, spec, newline_on_success, travis, add_env): def __init__(self, spec, newline_on_success, travis, add_env,
quiet_success=False):
self._spec = spec self._spec = spec
self._newline_on_success = newline_on_success self._newline_on_success = newline_on_success
self._travis = travis self._travis = travis
@ -227,6 +228,8 @@ class Job(object):
self._retries = 0 self._retries = 0
self._timeout_retries = 0 self._timeout_retries = 0
self._suppress_failure_message = False self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis) message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult() self.result = JobResult()
self.start() self.start()
@ -302,6 +305,7 @@ class Job(object):
if real > 0.5: if real > 0.5:
cores = (user + sys) / real cores = (user + sys) / real
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost) measurement = '; cpu_cost=%.01f; estimated=%.01f' % (cores, self._spec.cpu_cost)
if not self._quiet_success:
message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % ( message('PASSED', '%s [time=%.1fsec; retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement), self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None, stdout() if self._spec.verbose_success else None,
@ -341,7 +345,7 @@ class Jobset(object):
"""Manages one run of jobs.""" """Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env): stop_on_failure, add_env, quiet_success):
self._running = set() self._running = set()
self._check_cancelled = check_cancelled self._check_cancelled = check_cancelled
self._cancelled = False self._cancelled = False
@ -352,6 +356,7 @@ class Jobset(object):
self._travis = travis self._travis = travis
self._stop_on_failure = stop_on_failure self._stop_on_failure = stop_on_failure
self._add_env = add_env self._add_env = add_env
self._quiet_success = quiet_success
self.resultset = {} self.resultset = {}
self._remaining = None self._remaining = None
self._start_time = time.time() self._start_time = time.time()
@ -380,7 +385,8 @@ class Jobset(object):
job = Job(spec, job = Job(spec,
self._newline_on_success, self._newline_on_success,
self._travis, self._travis,
self._add_env) self._add_env,
self._quiet_success)
self._running.add(job) self._running.add(job)
if job.GetSpec().shortname not in self.resultset: if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = [] self.resultset[job.GetSpec().shortname] = []
@ -403,6 +409,7 @@ class Jobset(object):
break break
for job in dead: for job in dead:
self._completed += 1 self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result) self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job) self._running.remove(job)
if dead: return if dead: return
@ -463,7 +470,8 @@ def run(cmdlines,
infinite_runs=False, infinite_runs=False,
stop_on_failure=False, stop_on_failure=False,
add_env={}, add_env={},
skip_jobs=False): skip_jobs=False,
quiet_success=False):
if skip_jobs: if skip_jobs:
results = {} results = {}
skipped_job_result = JobResult() skipped_job_result = JobResult()
@ -474,7 +482,8 @@ def run(cmdlines,
return results return results
js = Jobset(check_cancelled, js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env) newline_on_success, travis, stop_on_failure, add_env,
quiet_success)
for cmdline, remaining in tag_remaining(cmdlines): for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline): if not js.start(cmdline):
break break

@ -1094,6 +1094,12 @@ argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report') help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str, argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report') help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--quiet_success',
default=False,
action='store_const',
const=True,
help='Dont print anything when a test passes. Passing tests also will not be reported in XML report. ' +
'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist') help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args() args = argp.parse_args()
@ -1441,17 +1447,21 @@ def _build_and_run(
else itertools.repeat(massaged_one_run, runs_per_test)) else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence) all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
num_test_failures, resultset = jobset.run( num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success, all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs, travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure, stop_on_failure=args.stop_on_failure,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port},
quiet_success=args.quiet_success)
if resultset: if resultset:
for k, v in sorted(resultset.items()): for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v) num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs??? if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True) jobset.message('FAILED', k, do_newline=True)
elif num_failures > 0: else:
jobset.message( jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs), 'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True) do_newline=True)

@ -242,6 +242,17 @@ def _allowed_labels():
return sorted(all_labels) return sorted(all_labels)
def _runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag."""
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer'.format(arg_str)
raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__": if __name__ == "__main__":
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs', argp.add_argument('-j', '--jobs',
@ -269,7 +280,7 @@ if __name__ == "__main__":
default=False, default=False,
action='store_const', action='store_const',
const=True, const=True,
help='Filters out tests irrelavant to pull request changes.') help='Filters out tests irrelevant to pull request changes.')
argp.add_argument('--base_branch', argp.add_argument('--base_branch',
default='origin/master', default='origin/master',
type=str, type=str,
@ -278,6 +289,9 @@ if __name__ == "__main__":
default=_DEFAULT_INNER_JOBS, default=_DEFAULT_INNER_JOBS,
type=int, type=int,
help='Number of jobs in each run_tests.py instance') help='Number of jobs in each run_tests.py instance')
argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
help='How many times to run each tests. >1 runs implies ' +
'omitting passing test from the output & reports.')
args = argp.parse_args() args = argp.parse_args()
extra_args = [] extra_args = []
@ -285,6 +299,10 @@ if __name__ == "__main__":
extra_args.append('--build_only') extra_args.append('--build_only')
if args.force_default_poller: if args.force_default_poller:
extra_args.append('--force_default_poller') extra_args.append('--force_default_poller')
if args.runs_per_test > 1:
extra_args.append('-n')
extra_args.append('%s' % args.runs_per_test)
extra_args.append('--quiet_success')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)

Loading…
Cancel
Save