Merge pull request #10847 from matt-kwong/cherry-pick_max_time

Cherry-pick: Allow specifying a maximum run time to run_tests
pull/10889/head
Matt Kwong 8 years ago committed by GitHub
commit 6be9397089
  1. 14
      tools/run_tests/python_utils/jobset.py
  2. 5
      tools/run_tests/run_tests.py

@ -348,7 +348,7 @@ class Jobset(object):
"""Manages one run of jobs.""" """Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, def __init__(self, check_cancelled, maxjobs, newline_on_success, travis,
stop_on_failure, add_env, quiet_success): stop_on_failure, add_env, quiet_success, max_time):
self._running = set() self._running = set()
self._check_cancelled = check_cancelled self._check_cancelled = check_cancelled
self._cancelled = False self._cancelled = False
@ -360,6 +360,7 @@ class Jobset(object):
self._stop_on_failure = stop_on_failure self._stop_on_failure = stop_on_failure
self._add_env = add_env self._add_env = add_env
self._quiet_success = quiet_success self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {} self.resultset = {}
self._remaining = None self._remaining = None
self._start_time = time.time() self._start_time = time.time()
@ -379,6 +380,12 @@ class Jobset(object):
def start(self, spec): def start(self, spec):
"""Start a job. Return True on success, False on failure.""" """Start a job. Return True on success, False on failure."""
while True: while True:
if self._max_time > 0 and time.time() - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled(): return False if self.cancelled(): return False
current_cpu_cost = self.cpu_cost() current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0: break if current_cpu_cost == 0: break
@ -474,7 +481,8 @@ def run(cmdlines,
stop_on_failure=False, stop_on_failure=False,
add_env={}, add_env={},
skip_jobs=False, skip_jobs=False,
quiet_success=False): quiet_success=False,
max_time=-1):
if skip_jobs: if skip_jobs:
resultset = {} resultset = {}
skipped_job_result = JobResult() skipped_job_result = JobResult()
@ -486,7 +494,7 @@ def run(cmdlines,
js = Jobset(check_cancelled, js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env, newline_on_success, travis, stop_on_failure, add_env,
quiet_success) quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines): for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline): if not js.start(cmdline):
break break

@ -1210,6 +1210,7 @@ argp.add_argument('--quiet_success',
'Useful when running many iterations of each test (argument -n).') 'Useful when running many iterations of each test (argument -n).')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist') help='Dont try to iterate over many polling strategies when they exist')
argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
args = argp.parse_args() args = argp.parse_args()
if args.force_default_poller: if args.force_default_poller:
@ -1465,7 +1466,7 @@ def _build_and_run(
not re.search(args.regex_exclude, spec.shortname)))) not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible # When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes. # for reproducibility purposes.
if args.travis: if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname) massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else: else:
# whereas otherwise, we want to shuffle things up to give all tests a # whereas otherwise, we want to shuffle things up to give all tests a
@ -1493,7 +1494,7 @@ def _build_and_run(
all_runs, check_cancelled, newline_on_success=newline_on_success, all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, maxjobs=args.jobs, travis=args.travis, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure, stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success) quiet_success=args.quiet_success, max_time=args.max_time)
if resultset: if resultset:
for k, v in sorted(resultset.items()): for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v) num_runs, num_failures = _calculate_num_runs_failures(v)

Loading…
Cancel
Save