yapf run_tests

pull/13719/head
ncteisen 7 years ago
parent a69c6901f9
commit 888093c6ed
  1. 5
      tools/distrib/yapf_code.sh
  2. 302
      tools/run_tests/run_build_statistics.py
  3. 1988
      tools/run_tests/run_interop_tests.py
  4. 346
      tools/run_tests/run_microbenchmark.py
  5. 1143
      tools/run_tests/run_performance_tests.py
  6. 2668
      tools/run_tests/run_tests.py
  7. 922
      tools/run_tests/run_tests_matrix.py
  8. 1
      tools/run_tests/start_port_server.py
  9. 100
      tools/run_tests/task_runner.py

@ -25,10 +25,7 @@ DIRS=(
'tools/distrib' 'tools/distrib'
'tools/interop_matrix' 'tools/interop_matrix'
'tools/profiling' 'tools/profiling'
'tools/run_tests/python_utils' 'tools/run_tests'
'tools/run_tests/sanity'
'tools/run_tests/performance'
'tools/run_tests/artifacts'
) )
EXCLUSIONS=( EXCLUSIONS=(
'grpcio/grpc_*.py' 'grpcio/grpc_*.py'

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Tool to get build statistics from Jenkins and upload to BigQuery.""" """Tool to get build statistics from Jenkins and upload to BigQuery."""
from __future__ import print_function from __future__ import print_function
@ -27,39 +26,38 @@ import re
import sys import sys
import urllib import urllib
gcp_utils_dir = os.path.abspath(
gcp_utils_dir = os.path.abspath(os.path.join( os.path.join(os.path.dirname(__file__), '../gcp/utils'))
os.path.dirname(__file__), '../gcp/utils'))
sys.path.append(gcp_utils_dir) sys.path.append(gcp_utils_dir)
import big_query_utils import big_query_utils
_PROJECT_ID = 'grpc-testing' _PROJECT_ID = 'grpc-testing'
_HAS_MATRIX = True _HAS_MATRIX = True
_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX, _BUILDS = {
'gRPC_master_linux': not _HAS_MATRIX, 'gRPC_interop_master': not _HAS_MATRIX,
'gRPC_master_macos': not _HAS_MATRIX, 'gRPC_master_linux': not _HAS_MATRIX,
'gRPC_master_windows': not _HAS_MATRIX, 'gRPC_master_macos': not _HAS_MATRIX,
'gRPC_performance_master': not _HAS_MATRIX, 'gRPC_master_windows': not _HAS_MATRIX,
'gRPC_portability_master_linux': not _HAS_MATRIX, 'gRPC_performance_master': not _HAS_MATRIX,
'gRPC_portability_master_windows': not _HAS_MATRIX, 'gRPC_portability_master_linux': not _HAS_MATRIX,
'gRPC_master_asanitizer_c': not _HAS_MATRIX, 'gRPC_portability_master_windows': not _HAS_MATRIX,
'gRPC_master_asanitizer_cpp': not _HAS_MATRIX, 'gRPC_master_asanitizer_c': not _HAS_MATRIX,
'gRPC_master_msan_c': not _HAS_MATRIX, 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
'gRPC_master_tsanitizer_c': not _HAS_MATRIX, 'gRPC_master_msan_c': not _HAS_MATRIX,
'gRPC_master_tsan_cpp': not _HAS_MATRIX, 'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
'gRPC_interop_pull_requests': not _HAS_MATRIX, 'gRPC_master_tsan_cpp': not _HAS_MATRIX,
'gRPC_performance_pull_requests': not _HAS_MATRIX, 'gRPC_interop_pull_requests': not _HAS_MATRIX,
'gRPC_portability_pull_requests_linux': not _HAS_MATRIX, 'gRPC_performance_pull_requests': not _HAS_MATRIX,
'gRPC_portability_pr_win': not _HAS_MATRIX, 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
'gRPC_pull_requests_linux': not _HAS_MATRIX, 'gRPC_portability_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_macos': not _HAS_MATRIX, 'gRPC_pull_requests_linux': not _HAS_MATRIX,
'gRPC_pr_win': not _HAS_MATRIX, 'gRPC_pull_requests_macos': not _HAS_MATRIX,
'gRPC_pull_requests_asan_c': not _HAS_MATRIX, 'gRPC_pr_win': not _HAS_MATRIX,
'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX, 'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
'gRPC_pull_requests_msan_c': not _HAS_MATRIX, 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_c': not _HAS_MATRIX, 'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX, 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
} }
_URL_BASE = 'https://grpc-testing.appspot.com/job' _URL_BASE = 'https://grpc-testing.appspot.com/job'
@ -99,147 +97,155 @@ _DATASET_ID = 'build_statistics'
def _scrape_for_known_errors(html): def _scrape_for_known_errors(html):
error_list = [] error_list = []
for known_error in _KNOWN_ERRORS: for known_error in _KNOWN_ERRORS:
errors = re.findall(known_error, html) errors = re.findall(known_error, html)
this_error_count = len(errors) this_error_count = len(errors)
if this_error_count > 0: if this_error_count > 0:
error_list.append({'description': known_error, error_list.append({
'count': this_error_count}) 'description': known_error,
print('====> %d failures due to %s' % (this_error_count, known_error)) 'count': this_error_count
return error_list })
print('====> %d failures due to %s' %
(this_error_count, known_error))
return error_list
def _no_report_files_found(html): def _no_report_files_found(html):
return _NO_REPORT_FILES_FOUND_ERROR in html return _NO_REPORT_FILES_FOUND_ERROR in html
def _get_last_processed_buildnumber(build_name): def _get_last_processed_buildnumber(build_name):
query = 'SELECT max(build_number) FROM [%s:%s.%s];' % ( query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
_PROJECT_ID, _DATASET_ID, build_name) _PROJECT_ID, _DATASET_ID, build_name)
query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query) query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
page = bq.jobs().getQueryResults( page = bq.jobs().getQueryResults(
pageToken=None, pageToken=None, **query_job['jobReference']).execute(num_retries=3)
**query_job['jobReference']).execute(num_retries=3) if page['rows'][0]['f'][0]['v']:
if page['rows'][0]['f'][0]['v']: return int(page['rows'][0]['f'][0]['v'])
return int(page['rows'][0]['f'][0]['v']) return 0
return 0
def _process_matrix(build, url_base): def _process_matrix(build, url_base):
matrix_list = [] matrix_list = []
for matrix in build.get_matrix_runs(): for matrix in build.get_matrix_runs():
matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*', matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
matrix.name).groups()[0] matrix.name).groups()[0]
matrix_tuple = matrix_str.split(',') matrix_tuple = matrix_str.split(',')
json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % ( json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % ( console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
matrix_dict = {'name': matrix_str, matrix_dict = {
'duration': matrix.get_duration().total_seconds()} 'name': matrix_str,
matrix_dict.update(_process_build(json_url, console_url)) 'duration': matrix.get_duration().total_seconds()
matrix_list.append(matrix_dict) }
matrix_dict.update(_process_build(json_url, console_url))
return matrix_list matrix_list.append(matrix_dict)
return matrix_list
def _process_build(json_url, console_url): def _process_build(json_url, console_url):
build_result = {} build_result = {}
error_list = [] error_list = []
try: try:
html = urllib.urlopen(json_url).read() html = urllib.urlopen(json_url).read()
test_result = json.loads(html) test_result = json.loads(html)
print('====> Parsing result from %s' % json_url) print('====> Parsing result from %s' % json_url)
failure_count = test_result['failCount'] failure_count = test_result['failCount']
build_result['pass_count'] = test_result['passCount'] build_result['pass_count'] = test_result['passCount']
build_result['failure_count'] = failure_count build_result['failure_count'] = failure_count
# This means Jenkins failure occurred. # This means Jenkins failure occurred.
build_result['no_report_files_found'] = _no_report_files_found(html) build_result['no_report_files_found'] = _no_report_files_found(html)
# Only check errors if Jenkins failure occurred. # Only check errors if Jenkins failure occurred.
if build_result['no_report_files_found']: if build_result['no_report_files_found']:
error_list = _scrape_for_known_errors(html) error_list = _scrape_for_known_errors(html)
except Exception as e: except Exception as e:
print('====> Got exception for %s: %s.' % (json_url, str(e))) print('====> Got exception for %s: %s.' % (json_url, str(e)))
print('====> Parsing errors from %s.' % console_url) print('====> Parsing errors from %s.' % console_url)
html = urllib.urlopen(console_url).read() html = urllib.urlopen(console_url).read()
build_result['pass_count'] = 0 build_result['pass_count'] = 0
build_result['failure_count'] = 1 build_result['failure_count'] = 1
# In this case, the string doesn't exist in the result html but the fact # In this case, the string doesn't exist in the result html but the fact
# that we fail to parse the result html indicates Jenkins failure and hence # that we fail to parse the result html indicates Jenkins failure and hence
# no report files were generated. # no report files were generated.
build_result['no_report_files_found'] = True build_result['no_report_files_found'] = True
error_list = _scrape_for_known_errors(html) error_list = _scrape_for_known_errors(html)
if error_list: if error_list:
build_result['error'] = error_list build_result['error'] = error_list
elif build_result['no_report_files_found']: elif build_result['no_report_files_found']:
build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}] build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
else: else:
build_result['error'] = [{'description': '', 'count': 0}] build_result['error'] = [{'description': '', 'count': 0}]
return build_result return build_result
# parse command line # parse command line
argp = argparse.ArgumentParser(description='Get build statistics.') argp = argparse.ArgumentParser(description='Get build statistics.')
argp.add_argument('-u', '--username', default='jenkins') argp.add_argument('-u', '--username', default='jenkins')
argp.add_argument('-b', '--builds', argp.add_argument(
choices=['all'] + sorted(_BUILDS.keys()), '-b',
nargs='+', '--builds',
default=['all']) choices=['all'] + sorted(_BUILDS.keys()),
nargs='+',
default=['all'])
args = argp.parse_args() args = argp.parse_args()
J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken') J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
bq = big_query_utils.create_big_query() bq = big_query_utils.create_big_query()
for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds: for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
print('====> Build: %s' % build_name) print('====> Build: %s' % build_name)
# Since get_last_completed_build() always fails due to malformatted string # Since get_last_completed_build() always fails due to malformatted string
# error, we use get_build_metadata() instead. # error, we use get_build_metadata() instead.
job = None job = None
try:
job = J[build_name]
except Exception as e:
print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue
last_processed_build_number = _get_last_processed_buildnumber(build_name)
last_complete_build_number = job.get_last_completed_buildnumber()
# To avoid processing all builds for a project never looked at. In this case,
# only examine 10 latest builds.
starting_build_number = max(last_processed_build_number+1,
last_complete_build_number-9)
for build_number in xrange(starting_build_number,
last_complete_build_number+1):
print('====> Processing %s build %d.' % (build_name, build_number))
build = None
try: try:
build = job.get_build_metadata(build_number) job = J[build_name]
print('====> Build status: %s.' % build.get_status()) except Exception as e:
if build.get_status() == 'ABORTED': print('====> Failed to get build %s: %s.' % (build_name, str(e)))
continue continue
# If any build is still running, stop processing this job. Next time, we last_processed_build_number = _get_last_processed_buildnumber(build_name)
# start from where it was left so that all builds are processed last_complete_build_number = job.get_last_completed_buildnumber()
# sequentially. # To avoid processing all builds for a project never looked at. In this case,
if build.is_running(): # only examine 10 latest builds.
print('====> Build %d is still running.' % build_number) starting_build_number = max(last_processed_build_number + 1,
break last_complete_build_number - 9)
except KeyError: for build_number in xrange(starting_build_number,
print('====> Build %s is missing. Skip.' % build_number) last_complete_build_number + 1):
continue print('====> Processing %s build %d.' % (build_name, build_number))
build_result = {'build_number': build_number, build = None
'timestamp': str(build.get_timestamp())} try:
url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number) build = job.get_build_metadata(build_number)
if _BUILDS[build_name]: # The build has matrix, such as gRPC_master. print('====> Build status: %s.' % build.get_status())
build_result['matrix'] = _process_matrix(build, url_base) if build.get_status() == 'ABORTED':
else: continue
json_url = '%s/testReport/api/json' % url_base # If any build is still running, stop processing this job. Next time, we
console_url = '%s/consoleFull' % url_base # start from where it was left so that all builds are processed
build_result['duration'] = build.get_duration().total_seconds() # sequentially.
build_stat = _process_build(json_url, console_url) if build.is_running():
build_result.update(build_stat) print('====> Build %d is still running.' % build_number)
rows = [big_query_utils.make_row(build_number, build_result)] break
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name, except KeyError:
rows): print('====> Build %s is missing. Skip.' % build_number)
print('====> Error uploading result to bigquery.') continue
sys.exit(1) build_result = {
'build_number': build_number,
'timestamp': str(build.get_timestamp())
}
url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
if _BUILDS[build_name]: # The build has matrix, such as gRPC_master.
build_result['matrix'] = _process_matrix(build, url_base)
else:
json_url = '%s/testReport/api/json' % url_base
console_url = '%s/consoleFull' % url_base
build_result['duration'] = build.get_duration().total_seconds()
build_stat = _process_build(json_url, console_url)
build_result.update(build_stat)
rows = [big_query_utils.make_row(build_number, build_result)]
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
build_name, rows):
print('====> Error uploading result to bigquery.')
sys.exit(1)

File diff suppressed because it is too large Load Diff

@ -23,26 +23,31 @@ import argparse
import python_utils.jobset as jobset import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server import python_utils.start_port_server as start_port_server
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff')) sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
'bm_diff'))
import bm_constants import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph') flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'): if not os.path.exists('reports'):
os.makedirs('reports') os.makedirs('reports')
start_port_server.start_port_server() start_port_server.start_port_server()
def fnize(s): def fnize(s):
out = '' out = ''
for c in s: for c in s:
if c in '<>, /': if c in '<>, /':
if len(out) and out[-1] == '_': continue if len(out) and out[-1] == '_': continue
out += '_' out += '_'
else: else:
out += c out += c
return out return out
# index html # index html
index_html = """ index_html = """
@ -53,169 +58,202 @@ index_html = """
<body> <body>
""" """
def heading(name): def heading(name):
global index_html global index_html
index_html += "<h1>%s</h1>\n" % name index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt): def link(txt, tgt):
global index_html global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % ( index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
cgi.escape(tgt, quote=True), cgi.escape(txt)) cgi.escape(tgt, quote=True), cgi.escape(txt))
def text(txt): def text(txt):
global index_html global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt) index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args): def collect_latency(bm_name, args):
"""generate latency profiles""" """generate latency profiles"""
benchmarks = [] benchmarks = []
profile_analysis = [] profile_analysis = []
cleanup = [] cleanup = []
heading('Latency Profiles: %s' % bm_name) heading('Latency Profiles: %s' % bm_name)
subprocess.check_call( subprocess.check_call([
['make', bm_name, 'make', bm_name, 'CONFIG=basicprof', '-j',
'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()]) '%d' % multiprocessing.cpu_count()
for line in subprocess.check_output(['bins/basicprof/%s' % bm_name, ])
'--benchmark_list_tests']).splitlines(): for line in subprocess.check_output(
link(line, '%s.txt' % fnize(line)) ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
benchmarks.append( link(line, '%s.txt' % fnize(line))
jobset.JobSpec(['bins/basicprof/%s' % bm_name, benchmarks.append(
'--benchmark_filter=^%s$' % line, jobset.JobSpec(
'--benchmark_min_time=0.05'], [
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}, 'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' %
shortname='profile-%s' % fnize(line))) line, '--benchmark_min_time=0.05'
profile_analysis.append( ],
jobset.JobSpec([sys.executable, environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
'tools/profiling/latency_profile/profile_analyzer.py', shortname='profile-%s' % fnize(line)))
'--source', '%s.trace' % fnize(line), '--fmt', 'simple', profile_analysis.append(
'--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60, jobset.JobSpec(
shortname='analyze-%s' % fnize(line))) [
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) sys.executable,
# periodically flush out the list of jobs: profile_analysis jobs at least 'tools/profiling/latency_profile/profile_analyzer.py',
# consume upwards of five gigabytes of ram in some cases, and so analysing '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
# hundreds of them at once is impractical -- but we want at least some '--out', 'reports/%s.txt' % fnize(line)
# concurrency or the work takes too long ],
if len(benchmarks) >= min(16, multiprocessing.cpu_count()): timeout_seconds=20 * 60,
# run up to half the cpu count: each benchmark can use up to two cores shortname='analyze-%s' % fnize(line)))
# (one for the microbenchmark, one for the data flush) cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) # periodically flush out the list of jobs: profile_analysis jobs at least
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) # consume upwards of five gigabytes of ram in some cases, and so analysing
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) # hundreds of them at once is impractical -- but we want at least some
benchmarks = [] # concurrency or the work takes too long
profile_analysis = [] if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
cleanup = [] # run up to half the cpu count: each benchmark can use up to two cores
# run the remaining benchmarks that weren't flushed # (one for the microbenchmark, one for the data flush)
if len(benchmarks): jobset.run(
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args): def collect_perf(bm_name, args):
"""generate flamegraphs""" """generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name) heading('Flamegraphs: %s' % bm_name)
subprocess.check_call( subprocess.check_call([
['make', bm_name, 'make', bm_name, 'CONFIG=mutrace', '-j',
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()]) '%d' % multiprocessing.cpu_count()
benchmarks = [] ])
profile_analysis = [] benchmarks = []
cleanup = [] profile_analysis = []
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name, cleanup = []
'--benchmark_list_tests']).splitlines(): for line in subprocess.check_output(
link(line, '%s.svg' % fnize(line)) ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
benchmarks.append( link(line, '%s.svg' % fnize(line))
jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line), benchmarks.append(
'-g', '-F', '997', jobset.JobSpec(
'bins/mutrace/%s' % bm_name, [
'--benchmark_filter=^%s$' % line, 'perf', 'record', '-o', '%s-perf.data' % fnize(
'--benchmark_min_time=10'], line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name,
shortname='perf-%s' % fnize(line))) '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
profile_analysis.append( ],
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'], shortname='perf-%s' % fnize(line)))
environ = { profile_analysis.append(
'PERF_BASE_NAME': fnize(line), jobset.JobSpec(
'OUTPUT_DIR': 'reports', [
'OUTPUT_FILENAME': fnize(line), 'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
}, ],
shortname='flame-%s' % fnize(line))) environ={
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) 'PERF_BASE_NAME': fnize(line),
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) 'OUTPUT_DIR': 'reports',
# periodically flush out the list of jobs: temporary space required for this 'OUTPUT_FILENAME': fnize(line),
# processing is large },
if len(benchmarks) >= 20: shortname='flame-%s' % fnize(line)))
# run up to half the cpu count: each benchmark can use up to two cores cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
# (one for the microbenchmark, one for the data flush) cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
jobset.run(benchmarks, maxjobs=1) # periodically flush out the list of jobs: temporary space required for this
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) # processing is large
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) if len(benchmarks) >= 20:
benchmarks = [] # run up to half the cpu count: each benchmark can use up to two cores
profile_analysis = [] # (one for the microbenchmark, one for the data flush)
cleanup = [] jobset.run(benchmarks, maxjobs=1)
# run the remaining benchmarks that weren't flushed jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
if len(benchmarks): jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
jobset.run(benchmarks, maxjobs=1) benchmarks = []
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) profile_analysis = []
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def run_summary(bm_name, cfg, base_json_name): def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call( subprocess.check_call([
['make', bm_name, 'make', bm_name, 'CONFIG=%s' % cfg, '-j',
'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()]) '%d' % multiprocessing.cpu_count()
cmd = ['bins/%s/%s' % (cfg, bm_name), ])
'--benchmark_out=%s.%s.json' % (base_json_name, cfg), cmd = [
'--benchmark_out_format=json'] 'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' %
if args.summary_time is not None: (base_json_name, cfg), '--benchmark_out_format=json'
cmd += ['--benchmark_min_time=%d' % args.summary_time] ]
return subprocess.check_output(cmd) if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
def collect_summary(bm_name, args): def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name) heading('Summary: %s [no counters]' % bm_name)
text(run_summary(bm_name, 'opt', bm_name)) text(run_summary(bm_name, 'opt', bm_name))
heading('Summary: %s [with counters]' % bm_name) heading('Summary: %s [with counters]' % bm_name)
text(run_summary(bm_name, 'counters', bm_name)) text(run_summary(bm_name, 'counters', bm_name))
if args.bigquery_upload: if args.bigquery_upload:
with open('%s.csv' % bm_name, 'w') as f: with open('%s.csv' % bm_name, 'w') as f:
f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', f.write(
'%s.counters.json' % bm_name, subprocess.check_output([
'%s.opt.json' % bm_name])) 'tools/profiling/microbenchmarks/bm2bq.py',
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name]) '%s.counters.json' % bm_name, '%s.opt.json' % bm_name
]))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name
])
collectors = { collectors = {
'latency': collect_latency, 'latency': collect_latency,
'perf': collect_perf, 'perf': collect_perf,
'summary': collect_summary, 'summary': collect_summary,
} }
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks') argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c', '--collect', argp.add_argument(
choices=sorted(collectors.keys()), '-c',
nargs='*', '--collect',
default=sorted(collectors.keys()), choices=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark') nargs='*',
argp.add_argument('-b', '--benchmarks', default=sorted(collectors.keys()),
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, help='Which collectors should be run against each benchmark')
default=bm_constants._AVAILABLE_BENCHMARK_TESTS, argp.add_argument(
nargs='+', '-b',
type=str, '--benchmarks',
help='Which microbenchmarks should be run') choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
argp.add_argument('--bigquery_upload', default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=False, nargs='+',
action='store_const', type=str,
const=True, help='Which microbenchmarks should be run')
help='Upload results from summary collection to bigquery') argp.add_argument(
argp.add_argument('--summary_time', '--bigquery_upload',
default=None, default=False,
type=int, action='store_const',
help='Minimum time to run benchmarks for the summary collection') const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument(
'--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args() args = argp.parse_args()
try: try:
for collect in args.collect: for collect in args.collect:
for bm_name in args.benchmarks: for bm_name in args.benchmarks:
collectors[collect](bm_name, args) collectors[collect](bm_name, args)
finally: finally:
if not os.path.exists('reports'): if not os.path.exists('reports'):
os.makedirs('reports') os.makedirs('reports')
index_html += "</body>\n</html>\n" index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f: with open('reports/index.html', 'w') as f:
f.write(index_html) f.write(index_html)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Run test matrix.""" """Run test matrix."""
from __future__ import print_function from __future__ import print_function
@ -29,14 +28,14 @@ from python_utils.filter_pull_request_tests import filter_tests
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT) os.chdir(_ROOT)
_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60 _DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
# Set the timeout high to allow enough time for sanitizers and pre-building # Set the timeout high to allow enough time for sanitizers and pre-building
# clang docker. # clang docker.
_CPP_RUNTESTS_TIMEOUT = 4*60*60 _CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
# C++ TSAN takes longer than other sanitizers # C++ TSAN takes longer than other sanitizers
_CPP_TSAN_RUNTESTS_TIMEOUT = 8*60*60 _CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
# Number of jobs assigned to each run_tests.py instance # Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2 _DEFAULT_INNER_JOBS = 2
@ -46,448 +45,517 @@ _REPORT_SUFFIX = 'sponge_log.xml'
def _report_filename(name): def _report_filename(name):
"""Generates report file name""" """Generates report file name"""
return 'report_%s_%s' % (name, _REPORT_SUFFIX) return 'report_%s_%s' % (name, _REPORT_SUFFIX)
def _report_filename_internal_ci(name): def _report_filename_internal_ci(name):
"""Generates report file name that leads to better presentation by internal CI""" """Generates report file name that leads to better presentation by internal CI"""
return '%s/%s' % (name, _REPORT_SUFFIX) return '%s/%s' % (name, _REPORT_SUFFIX)
def _docker_jobspec(name, runtests_args=[], runtests_envs={}, def _docker_jobspec(name,
runtests_args=[],
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS, inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None): timeout_seconds=None):
"""Run a single instance of run_tests.py in a docker container""" """Run a single instance of run_tests.py in a docker container"""
if not timeout_seconds: if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
test_job = jobset.JobSpec( test_job = jobset.JobSpec(
cmdline=['python', 'tools/run_tests/run_tests.py', cmdline=[
'--use_docker', 'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
'-t', '-j', str(inner_jobs), '-x', _report_filename(name),
'-j', str(inner_jobs), '--report_suite_name', '%s' % name
'-x', _report_filename(name), ] + runtests_args,
'--report_suite_name', '%s' % name] + runtests_args, environ=runtests_envs,
environ=runtests_envs, shortname='run_tests_%s' % name,
shortname='run_tests_%s' % name, timeout_seconds=timeout_seconds)
timeout_seconds=timeout_seconds) return test_job
return test_job
def _workspace_jobspec(name,
def _workspace_jobspec(name, runtests_args=[], workspace_name=None, runtests_args=[],
runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS, workspace_name=None,
runtests_envs={},
inner_jobs=_DEFAULT_INNER_JOBS,
timeout_seconds=None): timeout_seconds=None):
"""Run a single instance of run_tests.py in a separate workspace""" """Run a single instance of run_tests.py in a separate workspace"""
if not workspace_name: if not workspace_name:
workspace_name = 'workspace_%s' % name workspace_name = 'workspace_%s' % name
if not timeout_seconds: if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
env = {'WORKSPACE_NAME': workspace_name} env = {'WORKSPACE_NAME': workspace_name}
env.update(runtests_envs) env.update(runtests_envs)
test_job = jobset.JobSpec( test_job = jobset.JobSpec(
cmdline=['bash', cmdline=[
'tools/run_tests/helper_scripts/run_tests_in_workspace.sh', 'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
'-t', '-t', '-j', str(inner_jobs), '-x', '../%s' % _report_filename(name),
'-j', str(inner_jobs), '--report_suite_name', '%s' % name
'-x', '../%s' % _report_filename(name), ] + runtests_args,
'--report_suite_name', '%s' % name] + runtests_args, environ=env,
environ=env, shortname='run_tests_%s' % name,
shortname='run_tests_%s' % name, timeout_seconds=timeout_seconds)
timeout_seconds=timeout_seconds) return test_job
return test_job
def _generate_jobs(languages,
def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native', configs,
arch=None, compiler=None, platforms,
labels=[], extra_args=[], extra_envs={}, iomgr_platform='native',
inner_jobs=_DEFAULT_INNER_JOBS, arch=None,
timeout_seconds=None): compiler=None,
result = [] labels=[],
for language in languages: extra_args=[],
for platform in platforms: extra_envs={},
for config in configs: inner_jobs=_DEFAULT_INNER_JOBS,
name = '%s_%s_%s_%s' % (language, platform, config, iomgr_platform) timeout_seconds=None):
runtests_args = ['-l', language, result = []
'-c', config, for language in languages:
'--iomgr_platform', iomgr_platform] for platform in platforms:
if arch or compiler: for config in configs:
name += '_%s_%s' % (arch, compiler) name = '%s_%s_%s_%s' % (language, platform, config,
runtests_args += ['--arch', arch, iomgr_platform)
'--compiler', compiler] runtests_args = [
if '--build_only' in extra_args: '-l', language, '-c', config, '--iomgr_platform',
name += '_buildonly' iomgr_platform
for extra_env in extra_envs: ]
name += '_%s_%s' % (extra_env, extra_envs[extra_env]) if arch or compiler:
name += '_%s_%s' % (arch, compiler)
runtests_args += extra_args runtests_args += ['--arch', arch, '--compiler', compiler]
if platform == 'linux': if '--build_only' in extra_args:
job = _docker_jobspec(name=name, runtests_args=runtests_args, name += '_buildonly'
runtests_envs=extra_envs, inner_jobs=inner_jobs, for extra_env in extra_envs:
timeout_seconds=timeout_seconds) name += '_%s_%s' % (extra_env, extra_envs[extra_env])
else:
job = _workspace_jobspec(name=name, runtests_args=runtests_args, runtests_args += extra_args
runtests_envs=extra_envs, inner_jobs=inner_jobs, if platform == 'linux':
timeout_seconds=timeout_seconds) job = _docker_jobspec(
name=name,
job.labels = [platform, config, language, iomgr_platform] + labels runtests_args=runtests_args,
result.append(job) runtests_envs=extra_envs,
return result inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds)
else:
job = _workspace_jobspec(
name=name,
runtests_args=runtests_args,
runtests_envs=extra_envs,
inner_jobs=inner_jobs,
timeout_seconds=timeout_seconds)
job.labels = [platform, config, language, iomgr_platform
] + labels
result.append(job)
return result
def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
test_jobs = [] test_jobs = []
# supported on linux only # supported on linux only
test_jobs += _generate_jobs(languages=['sanity', 'php7'], test_jobs += _generate_jobs(
configs=['dbg', 'opt'], languages=['sanity', 'php7'],
platforms=['linux'], configs=['dbg', 'opt'],
labels=['basictests', 'multilang'], platforms=['linux'],
extra_args=extra_args, labels=['basictests', 'multilang'],
inner_jobs=inner_jobs) extra_args=extra_args,
inner_jobs=inner_jobs)
# supported on all platforms.
test_jobs += _generate_jobs(languages=['c'], # supported on all platforms.
configs=['dbg', 'opt'], test_jobs += _generate_jobs(
platforms=['linux', 'macos', 'windows'], languages=['c'],
labels=['basictests', 'corelang'], configs=['dbg', 'opt'],
extra_args=extra_args, platforms=['linux', 'macos', 'windows'],
inner_jobs=inner_jobs, labels=['basictests', 'corelang'],
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) extra_args=extra_args,
inner_jobs=inner_jobs,
test_jobs += _generate_jobs(languages=['csharp', 'python'], timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'], test_jobs += _generate_jobs(
labels=['basictests', 'multilang'], languages=['csharp', 'python'],
extra_args=extra_args, configs=['dbg', 'opt'],
inner_jobs=inner_jobs) platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'multilang'],
# supported on linux and mac. extra_args=extra_args,
test_jobs += _generate_jobs(languages=['c++'], inner_jobs=inner_jobs)
configs=['dbg', 'opt'],
platforms=['linux', 'macos'], # supported on linux and mac.
labels=['basictests', 'corelang'], test_jobs += _generate_jobs(
extra_args=extra_args, languages=['c++'],
inner_jobs=inner_jobs, configs=['dbg', 'opt'],
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) platforms=['linux', 'macos'],
labels=['basictests', 'corelang'],
test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'], extra_args=extra_args,
configs=['dbg', 'opt'], inner_jobs=inner_jobs,
platforms=['linux', 'macos'], timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
labels=['basictests', 'multilang'],
extra_args=extra_args, test_jobs += _generate_jobs(
inner_jobs=inner_jobs) languages=['grpc-node', 'ruby', 'php'],
configs=['dbg', 'opt'],
# supported on mac only. platforms=['linux', 'macos'],
test_jobs += _generate_jobs(languages=['objc'], labels=['basictests', 'multilang'],
configs=['dbg', 'opt'], extra_args=extra_args,
platforms=['macos'], inner_jobs=inner_jobs)
labels=['basictests', 'multilang'],
extra_args=extra_args, # supported on mac only.
inner_jobs=inner_jobs) test_jobs += _generate_jobs(
languages=['objc'],
# sanitizers configs=['dbg', 'opt'],
test_jobs += _generate_jobs(languages=['c'], platforms=['macos'],
configs=['msan', 'asan', 'tsan', 'ubsan'], labels=['basictests', 'multilang'],
platforms=['linux'], extra_args=extra_args,
labels=['sanitizers', 'corelang'], inner_jobs=inner_jobs)
extra_args=extra_args,
inner_jobs=inner_jobs, # sanitizers
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) test_jobs += _generate_jobs(
test_jobs += _generate_jobs(languages=['c++'], languages=['c'],
configs=['asan'], configs=['msan', 'asan', 'tsan', 'ubsan'],
platforms=['linux'], platforms=['linux'],
labels=['sanitizers', 'corelang'], labels=['sanitizers', 'corelang'],
extra_args=extra_args, extra_args=extra_args,
inner_jobs=inner_jobs, inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['c++'], test_jobs += _generate_jobs(
configs=['tsan'], languages=['c++'],
platforms=['linux'], configs=['asan'],
labels=['sanitizers', 'corelang'], platforms=['linux'],
extra_args=extra_args, labels=['sanitizers', 'corelang'],
inner_jobs=inner_jobs, extra_args=extra_args,
timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT) inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
return test_jobs test_jobs += _generate_jobs(
languages=['c++'],
configs=['tsan'],
def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): platforms=['linux'],
test_jobs = [] labels=['sanitizers', 'corelang'],
# portability C x86 extra_args=extra_args,
test_jobs += _generate_jobs(languages=['c'], inner_jobs=inner_jobs,
configs=['dbg'], timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
platforms=['linux'],
arch='x86', return test_jobs
compiler='default',
labels=['portability', 'corelang'],
extra_args=extra_args, def _create_portability_test_jobs(extra_args=[],
inner_jobs=inner_jobs) inner_jobs=_DEFAULT_INNER_JOBS):
test_jobs = []
# portability C and C++ on x64 # portability C x86
for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl', test_jobs += _generate_jobs(
'clang3.5', 'clang3.6', 'clang3.7']: languages=['c'],
test_jobs += _generate_jobs(languages=['c', 'c++'], configs=['dbg'],
configs=['dbg'], platforms=['linux'],
platforms=['linux'], arch='x86',
arch='x64', compiler='default',
compiler=compiler, labels=['portability', 'corelang'],
labels=['portability', 'corelang'], extra_args=extra_args,
extra_args=extra_args, inner_jobs=inner_jobs)
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) # portability C and C++ on x64
for compiler in [
# portability C on Windows 64-bit (x86 is the default) 'gcc4.8', 'gcc5.3', 'gcc_musl', 'clang3.5', 'clang3.6', 'clang3.7'
test_jobs += _generate_jobs(languages=['c'], ]:
configs=['dbg'], test_jobs += _generate_jobs(
platforms=['windows'], languages=['c', 'c++'],
arch='x64', configs=['dbg'],
compiler='default', platforms=['linux'],
labels=['portability', 'corelang'], arch='x64',
extra_args=extra_args, compiler=compiler,
inner_jobs=inner_jobs) labels=['portability', 'corelang'],
extra_args=extra_args,
# portability C++ on Windows inner_jobs=inner_jobs,
# TODO(jtattermusch): some of the tests are failing, so we force --build_only timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
test_jobs += _generate_jobs(languages=['c++'],
configs=['dbg'], # portability C on Windows 64-bit (x86 is the default)
platforms=['windows'], test_jobs += _generate_jobs(
arch='default', languages=['c'],
compiler='default', configs=['dbg'],
labels=['portability', 'corelang'], platforms=['windows'],
extra_args=extra_args + ['--build_only'], arch='x64',
inner_jobs=inner_jobs) compiler='default',
labels=['portability', 'corelang'],
# portability C and C++ on Windows using VS2017 (build only) extra_args=extra_args,
# TODO(jtattermusch): some of the tests are failing, so we force --build_only inner_jobs=inner_jobs)
test_jobs += _generate_jobs(languages=['c', 'c++'],
configs=['dbg'], # portability C++ on Windows
platforms=['windows'], # TODO(jtattermusch): some of the tests are failing, so we force --build_only
arch='x64', test_jobs += _generate_jobs(
compiler='cmake_vs2017', languages=['c++'],
labels=['portability', 'corelang'], configs=['dbg'],
extra_args=extra_args + ['--build_only'], platforms=['windows'],
inner_jobs=inner_jobs) arch='default',
compiler='default',
# C and C++ with the c-ares DNS resolver on Linux labels=['portability', 'corelang'],
test_jobs += _generate_jobs(languages=['c', 'c++'], extra_args=extra_args + ['--build_only'],
configs=['dbg'], platforms=['linux'], inner_jobs=inner_jobs)
labels=['portability', 'corelang'],
extra_args=extra_args, # portability C and C++ on Windows using VS2017 (build only)
extra_envs={'GRPC_DNS_RESOLVER': 'ares'}, # TODO(jtattermusch): some of the tests are failing, so we force --build_only
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) test_jobs += _generate_jobs(
languages=['c', 'c++'],
# TODO(zyc): Turn on this test after adding c-ares support on windows. configs=['dbg'],
# C with the c-ares DNS resolver on Windows platforms=['windows'],
# test_jobs += _generate_jobs(languages=['c'], arch='x64',
# configs=['dbg'], platforms=['windows'], compiler='cmake_vs2017',
# labels=['portability', 'corelang'], labels=['portability', 'corelang'],
# extra_args=extra_args, extra_args=extra_args + ['--build_only'],
# extra_envs={'GRPC_DNS_RESOLVER': 'ares'}) inner_jobs=inner_jobs)
# C and C++ build with cmake on Linux # C and C++ with the c-ares DNS resolver on Linux
# TODO(jtattermusch): some of the tests are failing, so we force --build_only test_jobs += _generate_jobs(
# to make sure it's buildable at least. languages=['c', 'c++'],
test_jobs += _generate_jobs(languages=['c', 'c++'], configs=['dbg'],
configs=['dbg'], platforms=['linux'],
platforms=['linux'], labels=['portability', 'corelang'],
arch='default', extra_args=extra_args,
compiler='cmake', extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
labels=['portability', 'corelang'], timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs) # TODO(zyc): Turn on this test after adding c-ares support on windows.
# C with the c-ares DNS resolver on Windows
test_jobs += _generate_jobs(languages=['python'], # test_jobs += _generate_jobs(languages=['c'],
configs=['dbg'], # configs=['dbg'], platforms=['windows'],
platforms=['linux'], # labels=['portability', 'corelang'],
arch='default', # extra_args=extra_args,
compiler='python_alpine', # extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
labels=['portability', 'multilang'],
extra_args=extra_args, # C and C++ build with cmake on Linux
inner_jobs=inner_jobs) # TODO(jtattermusch): some of the tests are failing, so we force --build_only
# to make sure it's buildable at least.
test_jobs += _generate_jobs(languages=['csharp'], test_jobs += _generate_jobs(
configs=['dbg'], languages=['c', 'c++'],
platforms=['linux'], configs=['dbg'],
arch='default', platforms=['linux'],
compiler='coreclr', arch='default',
labels=['portability', 'multilang'], compiler='cmake',
extra_args=extra_args, labels=['portability', 'corelang'],
inner_jobs=inner_jobs) extra_args=extra_args + ['--build_only'],
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(languages=['c'],
configs=['dbg'], test_jobs += _generate_jobs(
platforms=['linux'], languages=['python'],
iomgr_platform='uv', configs=['dbg'],
labels=['portability', 'corelang'], platforms=['linux'],
extra_args=extra_args, arch='default',
inner_jobs=inner_jobs, compiler='python_alpine',
timeout_seconds=_CPP_RUNTESTS_TIMEOUT) labels=['portability', 'multilang'],
extra_args=extra_args,
return test_jobs inner_jobs=inner_jobs)
test_jobs += _generate_jobs(
languages=['csharp'],
configs=['dbg'],
platforms=['linux'],
arch='default',
compiler='coreclr',
labels=['portability', 'multilang'],
extra_args=extra_args,
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(
languages=['c'],
configs=['dbg'],
platforms=['linux'],
iomgr_platform='uv',
labels=['portability', 'corelang'],
extra_args=extra_args,
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
return test_jobs
def _allowed_labels(): def _allowed_labels():
"""Returns a list of existing job labels.""" """Returns a list of existing job labels."""
all_labels = set() all_labels = set()
for job in _create_test_jobs() + _create_portability_test_jobs(): for job in _create_test_jobs() + _create_portability_test_jobs():
for label in job.labels: for label in job.labels:
all_labels.add(label) all_labels.add(label)
return sorted(all_labels) return sorted(all_labels)
def _runs_per_test_type(arg_str): def _runs_per_test_type(arg_str):
"""Auxiliary function to parse the "runs_per_test" flag.""" """Auxiliary function to parse the "runs_per_test" flag."""
try: try:
n = int(arg_str) n = int(arg_str)
if n <= 0: raise ValueError if n <= 0: raise ValueError
return n return n
except: except:
msg = '\'{}\' is not a positive integer'.format(arg_str) msg = '\'{}\' is not a positive integer'.format(arg_str)
raise argparse.ArgumentTypeError(msg) raise argparse.ArgumentTypeError(msg)
if __name__ == "__main__": if __name__ == "__main__":
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp = argparse.ArgumentParser(
argp.add_argument('-j', '--jobs', description='Run a matrix of run_tests.py tests.')
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, argp.add_argument(
type=int, '-j',
help='Number of concurrent run_tests.py instances.') '--jobs',
argp.add_argument('-f', '--filter', default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
choices=_allowed_labels(), type=int,
nargs='+', help='Number of concurrent run_tests.py instances.')
default=[], argp.add_argument(
help='Filter targets to run by label with AND semantics.') '-f',
argp.add_argument('--exclude', '--filter',
choices=_allowed_labels(), choices=_allowed_labels(),
nargs='+', nargs='+',
default=[], default=[],
help='Exclude targets with any of given labels.') help='Filter targets to run by label with AND semantics.')
argp.add_argument('--build_only', argp.add_argument(
default=False, '--exclude',
action='store_const', choices=_allowed_labels(),
const=True, nargs='+',
help='Pass --build_only flag to run_tests.py instances.') default=[],
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True, help='Exclude targets with any of given labels.')
help='Pass --force_default_poller to run_tests.py instances.') argp.add_argument(
argp.add_argument('--dry_run', '--build_only',
default=False, default=False,
action='store_const', action='store_const',
const=True, const=True,
help='Only print what would be run.') help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument('--filter_pr_tests', argp.add_argument(
default=False, '--force_default_poller',
action='store_const', default=False,
const=True, action='store_const',
help='Filters out tests irrelevant to pull request changes.') const=True,
argp.add_argument('--base_branch', help='Pass --force_default_poller to run_tests.py instances.')
default='origin/master', argp.add_argument(
type=str, '--dry_run',
help='Branch that pull request is requesting to merge into') default=False,
argp.add_argument('--inner_jobs', action='store_const',
default=_DEFAULT_INNER_JOBS, const=True,
type=int, help='Only print what would be run.')
help='Number of jobs in each run_tests.py instance') argp.add_argument(
argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type, '--filter_pr_tests',
help='How many times to run each tests. >1 runs implies ' + default=False,
'omitting passing test from the output & reports.') action='store_const',
argp.add_argument('--max_time', default=-1, type=int, const=True,
help='Maximum amount of time to run tests for' + help='Filters out tests irrelevant to pull request changes.')
'(other tests will be skipped)') argp.add_argument(
argp.add_argument('--internal_ci', '--base_branch',
default=False, default='origin/master',
action='store_const', type=str,
const=True, help='Branch that pull request is requesting to merge into')
help='Put reports into subdirectories to improve presentation of ' argp.add_argument(
'results by Internal CI.') '--inner_jobs',
argp.add_argument('--bq_result_table', default=_DEFAULT_INNER_JOBS,
default='', type=int,
type=str, help='Number of jobs in each run_tests.py instance')
nargs='?', argp.add_argument(
help='Upload test results to a specified BQ table.') '-n',
args = argp.parse_args() '--runs_per_test',
default=1,
if args.internal_ci: type=_runs_per_test_type,
_report_filename = _report_filename_internal_ci # override the function help='How many times to run each tests. >1 runs implies ' +
'omitting passing test from the output & reports.')
extra_args = [] argp.add_argument(
if args.build_only: '--max_time',
extra_args.append('--build_only') default=-1,
if args.force_default_poller: type=int,
extra_args.append('--force_default_poller') help='Maximum amount of time to run tests for' +
if args.runs_per_test > 1: '(other tests will be skipped)')
extra_args.append('-n') argp.add_argument(
extra_args.append('%s' % args.runs_per_test) '--internal_ci',
extra_args.append('--quiet_success') default=False,
if args.max_time > 0: action='store_const',
extra_args.extend(('--max_time', '%d' % args.max_time)) const=True,
if args.bq_result_table: help='Put reports into subdirectories to improve presentation of '
extra_args.append('--bq_result_table') 'results by Internal CI.')
extra_args.append('%s' % args.bq_result_table) argp.add_argument(
extra_args.append('--measure_cpu_costs') '--bq_result_table',
extra_args.append('--disable_auto_set_flakes') default='',
type=str,
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ nargs='?',
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) help='Upload test results to a specified BQ table.')
args = argp.parse_args()
jobs = []
for job in all_jobs: if args.internal_ci:
if not args.filter or all(filter in job.labels for filter in args.filter): _report_filename = _report_filename_internal_ci # override the function
if not any(exclude_label in job.labels for exclude_label in args.exclude):
jobs.append(job) extra_args = []
if args.build_only:
if not jobs: extra_args.append('--build_only')
jobset.message('FAILED', 'No test suites match given criteria.', if args.force_default_poller:
do_newline=True) extra_args.append('--force_default_poller')
sys.exit(1) if args.runs_per_test > 1:
extra_args.append('-n')
print('IMPORTANT: The changes you are testing need to be locally committed') extra_args.append('%s' % args.runs_per_test)
print('because only the committed changes in the current branch will be') extra_args.append('--quiet_success')
print('copied to the docker environment or into subworkspaces.') if args.max_time > 0:
extra_args.extend(('--max_time', '%d' % args.max_time))
skipped_jobs = [] if args.bq_result_table:
extra_args.append('--bq_result_table')
if args.filter_pr_tests: extra_args.append('%s' % args.bq_result_table)
print('Looking for irrelevant tests to skip...') extra_args.append('--measure_cpu_costs')
relevant_jobs = filter_tests(jobs, args.base_branch) extra_args.append('--disable_auto_set_flakes')
if len(relevant_jobs) == len(jobs):
print('No tests will be skipped.') all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
else: _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
print('These tests will be skipped:')
skipped_jobs = list(set(jobs) - set(relevant_jobs)) jobs = []
# Sort by shortnames to make printing of skipped tests consistent for job in all_jobs:
skipped_jobs.sort(key=lambda job: job.shortname) if not args.filter or all(filter in job.labels
for job in list(skipped_jobs): for filter in args.filter):
print(' %s' % job.shortname) if not any(exclude_label in job.labels
jobs = relevant_jobs for exclude_label in args.exclude):
jobs.append(job)
print('Will run these tests:')
for job in jobs: if not jobs:
jobset.message(
'FAILED', 'No test suites match given criteria.', do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
skipped_jobs = []
if args.filter_pr_tests:
print('Looking for irrelevant tests to skip...')
relevant_jobs = filter_tests(jobs, args.base_branch)
if len(relevant_jobs) == len(jobs):
print('No tests will be skipped.')
else:
print('These tests will be skipped:')
skipped_jobs = list(set(jobs) - set(relevant_jobs))
# Sort by shortnames to make printing of skipped tests consistent
skipped_jobs.sort(key=lambda job: job.shortname)
for job in list(skipped_jobs):
print(' %s' % job.shortname)
jobs = relevant_jobs
print('Will run these tests:')
for job in jobs:
if args.dry_run:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
else:
print(' %s' % job.shortname)
print
if args.dry_run: if args.dry_run:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline))) print('--dry_run was used, exiting')
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(
jobs, newline_on_success=True, travis=True, maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
ignored_num_skipped_failures, skipped_results = jobset.run(
skipped_jobs, skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(
resultset,
_report_filename('aggregate_tests'),
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message(
'SUCCESS',
'All run_tests.py instance finished successfully.',
do_newline=True)
else: else:
print(' %s' % job.shortname) jobset.message(
print 'FAILED',
'Some run_tests.py instance have failed.',
if args.dry_run: do_newline=True)
print('--dry_run was used, exiting') sys.exit(1)
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
ignored_num_skipped_failures, skipped_results = jobset.run(
skipped_jobs, skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, _report_filename('aggregate_tests'),
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)

@ -13,7 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
Wrapper around port server starting code. Wrapper around port server starting code.

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Runs selected gRPC test/build tasks.""" """Runs selected gRPC test/build tasks."""
from __future__ import print_function from __future__ import print_function
@ -32,52 +31,54 @@ _TARGETS += artifact_targets.targets()
_TARGETS += distribtest_targets.targets() _TARGETS += distribtest_targets.targets()
_TARGETS += package_targets.targets() _TARGETS += package_targets.targets()
def _create_build_map(): def _create_build_map():
"""Maps task names and labels to list of tasks to be built.""" """Maps task names and labels to list of tasks to be built."""
target_build_map = dict([(target.name, [target]) target_build_map = dict([(target.name, [target]) for target in _TARGETS])
for target in _TARGETS]) if len(_TARGETS) > len(target_build_map.keys()):
if len(_TARGETS) > len(target_build_map.keys()): raise Exception('Target names need to be unique')
raise Exception('Target names need to be unique')
label_build_map = {}
label_build_map = {} label_build_map['all'] = [t for t in _TARGETS] # to build all targets
label_build_map['all'] = [t for t in _TARGETS] # to build all targets for target in _TARGETS:
for target in _TARGETS: for label in target.labels:
for label in target.labels: if label in label_build_map:
if label in label_build_map: label_build_map[label].append(target)
label_build_map[label].append(target) else:
else: label_build_map[label] = [target]
label_build_map[label] = [target]
if set(target_build_map.keys()).intersection(label_build_map.keys()):
if set(target_build_map.keys()).intersection(label_build_map.keys()): raise Exception('Target names need to be distinct from label names')
raise Exception('Target names need to be distinct from label names') return dict(target_build_map.items() + label_build_map.items())
return dict( target_build_map.items() + label_build_map.items())
_BUILD_MAP = _create_build_map() _BUILD_MAP = _create_build_map()
argp = argparse.ArgumentParser(description='Runs build/test targets.') argp = argparse.ArgumentParser(description='Runs build/test targets.')
argp.add_argument('-b', '--build', argp.add_argument(
choices=sorted(_BUILD_MAP.keys()), '-b',
nargs='+', '--build',
default=['all'], choices=sorted(_BUILD_MAP.keys()),
help='Target name or target label to build.') nargs='+',
argp.add_argument('-f', '--filter', default=['all'],
choices=sorted(_BUILD_MAP.keys()), help='Target name or target label to build.')
nargs='+', argp.add_argument(
default=[], '-f',
help='Filter targets to build with AND semantics.') '--filter',
choices=sorted(_BUILD_MAP.keys()),
nargs='+',
default=[],
help='Filter targets to build with AND semantics.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int) argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-t', '--travis', argp.add_argument(
default=False, '-t', '--travis', default=False, action='store_const', const=True)
action='store_const',
const=True)
args = argp.parse_args() args = argp.parse_args()
# Figure out which targets to build # Figure out which targets to build
targets = [] targets = []
for label in args.build: for label in args.build:
targets += _BUILD_MAP[label] targets += _BUILD_MAP[label]
# Among targets selected by -b, filter out those that don't match the filter # Among targets selected by -b, filter out those that don't match the filter
targets = [t for t in targets if all(f in t.labels for f in args.filter)] targets = [t for t in targets if all(f in t.labels for f in args.filter)]
@ -86,30 +87,29 @@ targets = sorted(set(targets))
# Execute pre-build phase # Execute pre-build phase
prebuild_jobs = [] prebuild_jobs = []
for target in targets: for target in targets:
prebuild_jobs += target.pre_build_jobspecs() prebuild_jobs += target.pre_build_jobspecs()
if prebuild_jobs: if prebuild_jobs:
num_failures, _ = jobset.run( num_failures, _ = jobset.run(
prebuild_jobs, newline_on_success=True, maxjobs=args.jobs) prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
if num_failures != 0: if num_failures != 0:
jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True) jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
sys.exit(1) sys.exit(1)
build_jobs = [] build_jobs = []
for target in targets: for target in targets:
build_jobs.append(target.build_jobspec()) build_jobs.append(target.build_jobspec())
if not build_jobs: if not build_jobs:
print('Nothing to build.') print('Nothing to build.')
sys.exit(1) sys.exit(1)
jobset.message('START', 'Building targets.', do_newline=True) jobset.message('START', 'Building targets.', do_newline=True)
num_failures, resultset = jobset.run( num_failures, resultset = jobset.run(
build_jobs, newline_on_success=True, maxjobs=args.jobs) build_jobs, newline_on_success=True, maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'report_taskrunner_sponge_log.xml', report_utils.render_junit_xml_report(
suite_name='tasks') resultset, 'report_taskrunner_sponge_log.xml', suite_name='tasks')
if num_failures == 0: if num_failures == 0:
jobset.message('SUCCESS', 'All targets built successfully.', jobset.message(
do_newline=True) 'SUCCESS', 'All targets built successfully.', do_newline=True)
else: else:
jobset.message('FAILED', 'Failed to build targets.', jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
do_newline=True) sys.exit(1)
sys.exit(1)

Loading…
Cancel
Save