run_tests.py cleanup and simplification (#28808)

* only support cmake for CLanguage in run_tests.py

* add support for run_tests.py build step environ

* switch C/C++ run_tests.py build to build_cxx script

* CLanguage cleanup

* build C# entirely with build_csharp script

* move entire PHP build to build_php.sh

* fixup C# build on linux and mac

* run_dep_checks Makefile target is deprecated

* get rid of the "makefile" logic in run_tests.py

* fixup C# build on linux and mac

* XML_REPORT env variable is useless for --use_docker runs

* add a TODO

* move "main" functionality towards end of run_tests.py

* use self.args instead of global

* yapf format

* remove the no longer useful --update_submodules features of run_tests.py

* fix check_epollexclusive check in run_tests.py
pull/28815/head
Jan Tattermusch 3 years ago committed by GitHub
parent caa67ccc5e
commit 0ec0479ded
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      tools/run_tests/helper_scripts/build_csharp.bat
  2. 11
      tools/run_tests/helper_scripts/build_csharp.sh
  3. 5
      tools/run_tests/helper_scripts/build_cxx.bat
  4. 5
      tools/run_tests/helper_scripts/build_cxx.sh
  5. 11
      tools/run_tests/helper_scripts/build_php.sh
  6. 13
      tools/run_tests/helper_scripts/pre_build_csharp.bat
  7. 7
      tools/run_tests/helper_scripts/pre_build_csharp.sh
  8. 682
      tools/run_tests/run_tests.py

@ -14,7 +14,19 @@
setlocal
cd /d %~dp0\..\..\..\src\csharp
cd /d %~dp0\..\..\..
mkdir cmake
cd cmake
mkdir build
cd build
mkdir %ARCHITECTURE%
cd %ARCHITECTURE%
cmake -G "Visual Studio 14 2015" -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=OFF -DgRPC_MSVC_STATIC_RUNTIME=ON -DgRPC_XDS_USER_AGENT_IS_CSHARP=ON -DgRPC_BUILD_MSVC_MP_COUNT=%GRPC_RUN_TESTS_JOBS% ../../.. || goto :error
cmake --build . --target grpc_csharp_ext --config %MSBUILD_CONFIG% || goto :error
cd ..\..\..\src\csharp
dotnet build --configuration %MSBUILD_CONFIG% Grpc.sln || goto :error

@ -15,7 +15,16 @@
set -ex
cd "$(dirname "$0")/../../../src/csharp"
cd "$(dirname "$0")/../../.."
mkdir -p cmake/build
pushd cmake/build
cmake -DgRPC_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" -DgRPC_XDS_USER_AGENT_IS_CSHARP=ON ../..
make -j"${GRPC_RUN_TESTS_JOBS}" grpc_csharp_ext
popd
pushd src/csharp
if [ "$CONFIG" == "gcov" ]
then

@ -1,4 +1,4 @@
@rem Copyright 2017 gRPC authors.
@rem Copyright 2022 The gRPC Authors
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@ -23,6 +23,9 @@ cd build
cmake -DgRPC_BUILD_TESTS=ON %* ../.. || goto :error
@rem GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX will be set to either "c" or "cxx"
cmake --build . --target buildtests_%GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX% --config %MSBUILD_CONFIG% || goto :error
endlocal
goto :EOF

@ -1,5 +1,5 @@
#!/bin/bash
# Copyright 2015 gRPC authors.
# Copyright 2022 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -22,3 +22,6 @@ cd cmake/build
# MSBUILD_CONFIG's values are suitable for cmake as well
cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" "$@" ../..
# GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX will be set to either "c" or "cxx"
make -j"${GRPC_RUN_TESTS_JOBS}" "buildtests_${GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX}" "tools_${GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX}" "check_epollexclusive"

@ -20,7 +20,10 @@ CONFIG=${CONFIG:-opt}
# change to grpc repo root
cd "$(dirname "$0")/../../.."
root=$(pwd)
# build C core first
make -j"${GRPC_RUN_TESTS_JOBS}" EMBED_OPENSSL=true EMBED_ZLIB=true static_c shared_c
repo_root="$(pwd)"
export GRPC_LIB_SUBDIR=libs/$CONFIG
export CFLAGS="-Wno-parentheses-equality"
@ -30,8 +33,8 @@ cd src/php
cd ext/grpc
phpize
if [ "$CONFIG" != "gcov" ] ; then
./configure --enable-grpc="$root" --enable-tests
./configure --enable-grpc="${repo_root}" --enable-tests
else
./configure --enable-grpc="$root" --enable-coverage --enable-tests
./configure --enable-grpc="${repo_root}" --enable-coverage --enable-tests
fi
make
make -j"${GRPC_RUN_TESTS_JOBS}"

@ -16,21 +16,10 @@
setlocal
set ARCHITECTURE=%1
@rem enter repo root
cd /d %~dp0\..\..\..
mkdir cmake
cd cmake
mkdir build
cd build
mkdir %ARCHITECTURE%
cd %ARCHITECTURE%
cmake -G "Visual Studio 14 2015" -A %ARCHITECTURE% -DgRPC_BUILD_TESTS=OFF -DgRPC_MSVC_STATIC_RUNTIME=ON -DgRPC_XDS_USER_AGENT_IS_CSHARP=ON -DgRPC_BUILD_MSVC_MP_COUNT=4 ../../.. || goto :error
cd ..\..\..\src\csharp
cd src\csharp
dotnet restore Grpc.sln || goto :error

@ -18,11 +18,6 @@ set -ex
# cd to repository root
cd "$(dirname "$0")/../../.."
mkdir -p cmake/build
cd cmake/build
cmake -DgRPC_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" -DgRPC_XDS_USER_AGENT_IS_CSHARP=ON ../..
cd ../../src/csharp
cd src/csharp
dotnet restore Grpc.sln

@ -240,16 +240,14 @@ def _pypy_pattern_function(major):
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
def __init__(self, lang_suffix, test_lang):
self.lang_suffix = lang_suffix
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
self._make_options = []
self._use_cmake = True
if self.platform == 'windows':
_check_compiler(self.args.compiler, [
'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
@ -286,7 +284,7 @@ class CLanguage(object):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
if self._use_cmake and target.get('boringssl', False):
if target.get('boringssl', False):
# cmake doesn't build boringssl tests
continue
auto_timeout_scaling = target.get('auto_timeout_scaling', True)
@ -327,11 +325,8 @@ class CLanguage(object):
binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
self.config.build_config], target['name'])
else:
if self._use_cmake:
binary = 'cmake/build/%s' % target['name']
else:
binary = 'bins/%s/%s' % (self.config.build_config,
target['name'])
binary = 'cmake/build/%s' % target['name']
cpu_cost = target['cpu_cost']
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
@ -419,32 +414,22 @@ class CLanguage(object):
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return [
'buildtests_%s' % self.make_target,
'tools_%s' % self.make_target, 'check_epollexclusive'
]
def make_options(self):
return self._make_options
def pre_build_steps(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
'-DgRPC_BUILD_MSVC_MP_COUNT=%d' % args.jobs
'tools\\run_tests\\helper_scripts\\build_cxx.bat',
'-DgRPC_BUILD_MSVC_MP_COUNT=%d' % self.args.jobs
] + self._cmake_configure_extra_args]
elif self._use_cmake:
return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
self._cmake_configure_extra_args]
else:
return []
return [['tools/run_tests/helper_scripts/build_cxx.sh'] +
self._cmake_configure_extra_args]
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {'GRPC_RUN_TESTS_CXX_LANGUAGE_SUFFIX': self.lang_suffix}
def post_tests_steps(self):
if self.platform == 'windows':
@ -452,12 +437,6 @@ class CLanguage(object):
else:
return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
def makefile_name(self):
if self._use_cmake:
return 'cmake/build/Makefile'
else:
return 'Makefile'
def _clang_cmake_configure_extra_args(self, version_suffix=''):
return [
'-DCMAKE_C_COMPILER=clang%s' % version_suffix,
@ -497,7 +476,7 @@ class CLanguage(object):
self._docker_distro, _docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
return self.lang_suffix
# This tests Node on grpc/grpc-node and will become the standard for Node testing
@ -545,21 +524,16 @@ class RemoteNodeLanguage(object):
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
self.args.arch)
@ -574,7 +548,6 @@ class Php7Language(object):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
return [
@ -585,21 +558,16 @@ class Php7Language(object):
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return self._make_options
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_php.sh']]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_debian11_%s' % _docker_arch_suffix(
self.args.arch)
@ -671,24 +639,19 @@ class PythonLanguage(object):
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
if self.config.build_config != 'gcov':
return []
else:
return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (
self._python_docker_distro_name(),
@ -858,21 +821,16 @@ class RubyLanguage(object):
def pre_build_steps(self):
return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/helper_scripts/build_ruby.sh']]
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_debian11_%s' % _docker_arch_suffix(
self.args.arch)
@ -944,38 +902,28 @@ class CSharpLanguage(object):
def pre_build_steps(self):
if self.platform == 'windows':
return [[
'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
self._cmake_arch_option
]]
return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
else:
return [['tools/run_tests/helper_scripts/build_csharp.sh']]
def post_tests_steps(self):
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
if self.platform == 'windows':
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
return {'ARCHITECTURE': self._cmake_arch_option}
else:
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
return {}
def makefile_name(self):
def post_tests_steps(self):
if self.platform == 'windows':
return 'cmake/build/%s/Makefile' % self._cmake_arch_option
return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
else:
# no need to set x86 specific flags as run_tests.py
# currently forbids x86 C# builds on both Linux and MacOS.
return 'cmake/build/Makefile'
return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (
@ -1139,21 +1087,16 @@ class ObjCLanguage(object):
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
@ -1191,21 +1134,16 @@ class Sanity(object):
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def build_steps_environ(self):
"""Extra environment variables set for pre_build_steps and build_steps jobs."""
return {}
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
@ -1237,6 +1175,16 @@ _MSBUILD_CONFIG = {
}
def _build_step_environ(cfg, extra_env={}):
"""Environment variables set for each build step."""
environ = {'CONFIG': cfg, 'GRPC_RUN_TESTS_JOBS': str(args.jobs)}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
environ.update(extra_env)
return environ
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
@ -1319,6 +1267,195 @@ def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def _shut_down_legacy_server(legacy_server_port):
"""Shut down legacy version of port server."""
try:
version = int(
urllib.request.urlopen('http://localhost:%d/version_number' %
legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Calculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
def _has_epollexclusive():
binary = 'cmake/build/check_epollexclusive'
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
class BuildAndRunError(object):
"""Represents error type in _build_and_run."""
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(spec for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message('FLAKE',
'%s [%d/%d runs flaked]' %
(k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0
], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
try:
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
except NameError as e:
logging.warning(
e) # It's fine to ignore since this is not critical
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset,
xml_report,
suite_name=args.report_suite_name,
multi_target=args.report_multi_target)
number_failures, _ = jobset.run(post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c',
@ -1433,15 +1570,6 @@ argp.add_argument('--measure_cpu_costs',
action='store_const',
const=True,
help='Measure the cpu costs of tests')
argp.add_argument(
'--update_submodules',
default=[],
nargs='*',
help=
'Update some submodules before building. If any are updated, also run generate_projects. '
+
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
)
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x',
'--xml_report',
@ -1501,40 +1629,11 @@ elif args.force_use_pollers:
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
run_shell_command('git %s' % cmd, cwd=cwd)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
run_shell_command('tools/buildgen/generate_projects.sh')
else:
print(
'WARNING: may need to regenerate projects, but since we are not on')
print(
' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
# TODO(jtattermusch): is this setting applied/being used?
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
@ -1542,22 +1641,12 @@ languages = set(_LANGUAGES[l] for l in args.language)
for l in languages:
l.configure(run_config, args)
language_make_options = []
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print(
'languages with custom make options cannot be built simultaneously with other languages'
)
sys.exit(1)
else:
# Combining make options is not clean and just happens to work. It allows C & C++ to build
# together, and is only used under gcov. All other configs should build languages individually.
language_make_options = list(
set([
make_option for lang in languages
for make_option in lang.make_options()
]))
if len(languages) != 1:
print('Building multiple languages simultaneously is not supported!')
sys.exit(1)
# If --use_docker was used, respawn the run_tests.py script under a docker container
# instead of continuing.
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
@ -1586,9 +1675,6 @@ if args.use_docker:
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run.sh'
env['DOCKER_RUN_SCRIPT_COMMAND'] = run_tests_cmd
# TODO(jtattermusch): is the XML_REPORT env variable any useful?
if args.xml_report:
env['XML_REPORT'] = args.xml_report
retcode = subprocess.call(
'tools/run_tests/dockerize/build_and_run_docker.sh',
@ -1599,276 +1685,38 @@ if args.use_docker:
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
return [
jobset.JobSpec([
'cmake', '--build', '.', '--target',
'%s' % target, '--config', _MSBUILD_CONFIG[cfg]
],
cwd=os.path.dirname(makefile),
timeout_seconds=None) for target in targets
]
else:
if targets and makefile.startswith('cmake/build/'):
# With cmake, we've passed all the build configuration in the pre-build step already
return [
jobset.JobSpec(
[os.getenv('MAKE', 'make'), '-j',
'%d' % args.jobs] + targets,
cwd='cmake/build',
timeout_seconds=None)
]
if targets:
return [
jobset.JobSpec(
[
os.getenv('MAKE', 'make'), '-f', makefile, '-j',
'%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
args.slowdown,
'CONFIG=%s' % cfg, 'Q='
] + language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
timeout_seconds=None)
]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
# collect pre-build steps (which get retried if they fail, e.g. to avoid
# flakes on downloading dependencies etc.)
build_steps = list(
set(
jobset.JobSpec(cmdline,
environ=build_step_environ(build_config),
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()),
timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
flake_retries=2)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(
make_jobspec(build_config, list(targets), makefile)
for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
# collect build steps
build_steps.extend(
set(
jobset.JobSpec(cmdline,
environ=build_step_environ(build_config),
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()),
timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
# collect post test steps
post_tests_steps = list(
set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
jobset.JobSpec(cmdline,
environ=_build_step_environ(
build_config, extra_env=l.build_steps_environ()))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(
urllib.request.urlopen('http://localhost:%d/version_number' %
legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen('http://localhost:%d/quitquitquit' %
legacy_server_port).read()
def _calculate_num_runs_failures(list_of_results):
"""Calculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
def _has_epollexclusive():
binary = 'bins/%s/check_epollexclusive' % args.config
if not os.path.exists(binary):
return False
try:
subprocess.check_call(binary)
return True
except subprocess.CalledProcessError as e:
return False
except OSError as e:
# For languages other than C and Windows the binary won't exist
return False
# returns a list of things that failed (or an empty list on success)
def _build_and_run(check_cancelled,
newline_on_success,
xml_report=None,
build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(build_steps,
maxjobs=1,
stop_on_failure=True,
newline_on_success=newline_on_success,
travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
return []
if not args.travis and not _has_epollexclusive() and platform_string(
) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
platform_string()]:
print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
_POLLING_STRATEGIES[platform_string()].remove('epollex')
# start antagonists
antagonists = [
subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
for _ in range(0, args.antagonists)
]
start_port_server.start_port_server()
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(spec for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis and args.max_time <= 0:
massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(
one_run) # random.sample needs an indexable seq.
num_jobs = len(massaged_one_run)
# for a random sample, get as many as indicated by the 'sample_percent'
# argument. By default this arg is 100, resulting in a shuffle of all
# jobs.
sample_size = int(num_jobs * args.sample_percent / 100.0)
massaged_one_run = random.sample(massaged_one_run, sample_size)
if not isclose(args.sample_percent, 100.0):
assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
print("Running %d tests out of %d (~%d%%)" %
(sample_size, num_jobs, args.sample_percent))
if infinite_runs:
assert len(massaged_one_run
) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
if args.quiet_success:
jobset.message(
'START',
'Running tests quietly, only failing tests will be reported',
do_newline=True)
num_test_failures, resultset = jobset.run(
all_runs,
check_cancelled,
newline_on_success=newline_on_success,
travis=args.travis,
maxjobs=args.jobs,
maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
stop_on_failure=args.stop_on_failure,
quiet_success=args.quiet_success,
max_time=args.max_time)
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures > 0:
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
else:
jobset.message('FLAKE',
'%s [%d/%d runs flaked]' %
(k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if args.bq_result_table and resultset:
upload_extra_fields = {
'compiler': args.compiler,
'config': args.config,
'iomgr_platform': args.iomgr_platform,
'language': args.language[
0
], # args.language is a list but will always have one element when uploading to BQ is enabled.
'platform': platform_string()
}
try:
upload_results_to_bq(resultset, args.bq_result_table,
upload_extra_fields)
except NameError as e:
logging.warning(
e) # It's fine to ignore since this is not critical
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset,
xml_report,
suite_name=args.report_suite_name,
multi_target=args.report_multi_target)
number_failures, _ = jobset.run(post_tests_steps,
maxjobs=1,
stop_on_failure=False,
newline_on_success=newline_on_success,
travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,

Loading…
Cancel
Save