diff --git a/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh index c2ea6f2c6eb..b4b172ddef6 100755 --- a/tools/run_tests/dockerize/build_docker_and_run_tests.sh +++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh @@ -44,9 +44,6 @@ mkdir -p /tmp/ccache # its cache location now that --download-cache is deprecated). mkdir -p /tmp/xdg-cache-home -# Create a local branch so the child Docker script won't complain -git branch -f jenkins-docker - # Inputs # DOCKERFILE_DIR - Directory in which Dockerfile file is located. # DOCKER_RUN_SCRIPT - Script to run under docker (relative to grpc repo root) @@ -86,9 +83,12 @@ docker run \ $DOCKER_IMAGE_NAME \ bash -l "/var/local/jenkins/grpc/$DOCKER_RUN_SCRIPT" || DOCKER_FAILED="true" -docker cp "$CONTAINER_NAME:/var/local/git/grpc/reports.zip" $git_root || true -unzip -o $git_root/reports.zip -d $git_root || true -rm -f reports.zip +# use unique name for reports.zip to prevent clash between concurrent +# run_tests.py runs +TEMP_REPORTS_ZIP=`mktemp` +docker cp "$CONTAINER_NAME:/var/local/git/grpc/reports.zip" ${TEMP_REPORTS_ZIP} || true +unzip -o ${TEMP_REPORTS_ZIP} -d $git_root || true +rm -f ${TEMP_REPORTS_ZIP} # remove the container, possibly killing it first docker rm -f $CONTAINER_NAME || true diff --git a/tools/run_tests/run_tests_in_workspace.sh b/tools/run_tests/run_tests_in_workspace.sh index 0e7604dbc55..b0c9ad05f76 100755 --- a/tools/run_tests/run_tests_in_workspace.sh +++ b/tools/run_tests/run_tests_in_workspace.sh @@ -37,6 +37,8 @@ set -ex cd $(dirname $0)/../.. rm -rf "${WORKSPACE_NAME}" +# TODO(jtattermusch): clone --recursive fetches the submodules from github. +# Try avoiding that to save time and network capacity. git clone --recursive . "${WORKSPACE_NAME}" echo "Running run_tests.py in workspace ${WORKSPACE_NAME}" diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 2344e2f9fd8..bc2e37abd89 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -32,6 +32,7 @@ import argparse import jobset +import multiprocessing import os import report_utils import sys @@ -40,8 +41,12 @@ _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(_ROOT) # TODO(jtattermusch): this is not going to be enough for sanitizers. +# TODO(jtattermusch): this is not going to be enough for rebuilding clang docker. _RUNTESTS_TIMEOUT = 30*60 +# Number of jobs assigned to each run_tests.py instance +_INNER_JOBS = 2 + def _docker_jobspec(name, runtests_args=[]): """Run a single instance of run_tests.py in a docker container""" @@ -50,7 +55,7 @@ def _docker_jobspec(name, runtests_args=[]): cmdline=['python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', - '-j', '3', + '-j', str(_INNER_JOBS), '-x', 'report_%s.xml' % name] + runtests_args, shortname='run_tests_%s' % name, timeout_seconds=_RUNTESTS_TIMEOUT) @@ -59,11 +64,13 @@ def _docker_jobspec(name, runtests_args=[]): def _workspace_jobspec(name, runtests_args=[], workspace_name=None): """Run a single instance of run_tests.py in a separate workspace""" + if not workspace_name: + workspace_name = 'workspace_%s' % name env = {'WORKSPACE_NAME': workspace_name} test_job = jobset.JobSpec( cmdline=['tools/run_tests/run_tests_in_workspace.sh', '-t', - '-j', '3', + '-j', str(_INNER_JOBS), '-x', '../report_%s.xml' % name] + runtests_args, environ=env, shortname='run_tests_%s' % name, @@ -196,6 +203,10 @@ for job in all_jobs: all_labels.add(label) argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') +argp.add_argument('-j', '--jobs', + default=multiprocessing.cpu_count()/_INNER_JOBS, + type=int, + help='Number of concurrent run_tests.py instances.') argp.add_argument('-f', '--filter', choices=sorted(all_labels), nargs='+', @@ -227,7 +238,7 @@ jobset.message('START', 'Running test matrix.', do_newline=True) num_failures, resultset = jobset.run(jobs, newline_on_success=True, travis=True, - maxjobs=2) + maxjobs=args.jobs) report_utils.render_junit_xml_report(resultset, 'report.xml') if num_failures == 0: