diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh index 6d2759ddd12..698c341d888 100755 --- a/tools/distrib/yapf_code.sh +++ b/tools/distrib/yapf_code.sh @@ -25,10 +25,7 @@ DIRS=( 'tools/distrib' 'tools/interop_matrix' 'tools/profiling' - 'tools/run_tests/python_utils' - 'tools/run_tests/sanity' - 'tools/run_tests/performance' - 'tools/run_tests/artifacts' + 'tools/run_tests' ) EXCLUSIONS=( 'grpcio/grpc_*.py' diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py index 1e957b66774..4af00a47a67 100755 --- a/tools/run_tests/run_build_statistics.py +++ b/tools/run_tests/run_build_statistics.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tool to get build statistics from Jenkins and upload to BigQuery.""" from __future__ import print_function @@ -27,39 +26,38 @@ import re import sys import urllib - -gcp_utils_dir = os.path.abspath(os.path.join( - os.path.dirname(__file__), '../gcp/utils')) +gcp_utils_dir = os.path.abspath( + os.path.join(os.path.dirname(__file__), '../gcp/utils')) sys.path.append(gcp_utils_dir) import big_query_utils - _PROJECT_ID = 'grpc-testing' _HAS_MATRIX = True -_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX, - 'gRPC_master_linux': not _HAS_MATRIX, - 'gRPC_master_macos': not _HAS_MATRIX, - 'gRPC_master_windows': not _HAS_MATRIX, - 'gRPC_performance_master': not _HAS_MATRIX, - 'gRPC_portability_master_linux': not _HAS_MATRIX, - 'gRPC_portability_master_windows': not _HAS_MATRIX, - 'gRPC_master_asanitizer_c': not _HAS_MATRIX, - 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX, - 'gRPC_master_msan_c': not _HAS_MATRIX, - 'gRPC_master_tsanitizer_c': not _HAS_MATRIX, - 'gRPC_master_tsan_cpp': not _HAS_MATRIX, - 'gRPC_interop_pull_requests': not _HAS_MATRIX, - 'gRPC_performance_pull_requests': not _HAS_MATRIX, - 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX, - 'gRPC_portability_pr_win': not _HAS_MATRIX, - 'gRPC_pull_requests_linux': not _HAS_MATRIX, - 'gRPC_pull_requests_macos': not _HAS_MATRIX, - 'gRPC_pr_win': not _HAS_MATRIX, - 'gRPC_pull_requests_asan_c': not _HAS_MATRIX, - 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX, - 'gRPC_pull_requests_msan_c': not _HAS_MATRIX, - 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX, - 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX, +_BUILDS = { + 'gRPC_interop_master': not _HAS_MATRIX, + 'gRPC_master_linux': not _HAS_MATRIX, + 'gRPC_master_macos': not _HAS_MATRIX, + 'gRPC_master_windows': not _HAS_MATRIX, + 'gRPC_performance_master': not _HAS_MATRIX, + 'gRPC_portability_master_linux': not _HAS_MATRIX, + 'gRPC_portability_master_windows': not _HAS_MATRIX, + 'gRPC_master_asanitizer_c': not _HAS_MATRIX, + 'gRPC_master_asanitizer_cpp': not _HAS_MATRIX, + 'gRPC_master_msan_c': not _HAS_MATRIX, + 'gRPC_master_tsanitizer_c': not _HAS_MATRIX, + 'gRPC_master_tsan_cpp': not _HAS_MATRIX, + 'gRPC_interop_pull_requests': not _HAS_MATRIX, + 'gRPC_performance_pull_requests': not _HAS_MATRIX, + 'gRPC_portability_pull_requests_linux': not _HAS_MATRIX, + 'gRPC_portability_pr_win': not _HAS_MATRIX, + 'gRPC_pull_requests_linux': not _HAS_MATRIX, + 'gRPC_pull_requests_macos': not _HAS_MATRIX, + 'gRPC_pr_win': not _HAS_MATRIX, + 'gRPC_pull_requests_asan_c': not _HAS_MATRIX, + 'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX, + 'gRPC_pull_requests_msan_c': not _HAS_MATRIX, + 'gRPC_pull_requests_tsan_c': not _HAS_MATRIX, + 'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX, } _URL_BASE = 'https://grpc-testing.appspot.com/job' @@ -99,147 +97,155 @@ _DATASET_ID = 'build_statistics' def _scrape_for_known_errors(html): - error_list = [] - for known_error in _KNOWN_ERRORS: - errors = re.findall(known_error, html) - this_error_count = len(errors) - if this_error_count > 0: - error_list.append({'description': known_error, - 'count': this_error_count}) - print('====> %d failures due to %s' % (this_error_count, known_error)) - return error_list + error_list = [] + for known_error in _KNOWN_ERRORS: + errors = re.findall(known_error, html) + this_error_count = len(errors) + if this_error_count > 0: + error_list.append({ + 'description': known_error, + 'count': this_error_count + }) + print('====> %d failures due to %s' % + (this_error_count, known_error)) + return error_list def _no_report_files_found(html): - return _NO_REPORT_FILES_FOUND_ERROR in html + return _NO_REPORT_FILES_FOUND_ERROR in html def _get_last_processed_buildnumber(build_name): - query = 'SELECT max(build_number) FROM [%s:%s.%s];' % ( - _PROJECT_ID, _DATASET_ID, build_name) - query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query) - page = bq.jobs().getQueryResults( - pageToken=None, - **query_job['jobReference']).execute(num_retries=3) - if page['rows'][0]['f'][0]['v']: - return int(page['rows'][0]['f'][0]['v']) - return 0 + query = 'SELECT max(build_number) FROM [%s:%s.%s];' % ( + _PROJECT_ID, _DATASET_ID, build_name) + query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query) + page = bq.jobs().getQueryResults( + pageToken=None, **query_job['jobReference']).execute(num_retries=3) + if page['rows'][0]['f'][0]['v']: + return int(page['rows'][0]['f'][0]['v']) + return 0 def _process_matrix(build, url_base): - matrix_list = [] - for matrix in build.get_matrix_runs(): - matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*', - matrix.name).groups()[0] - matrix_tuple = matrix_str.split(',') - json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % ( - url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) - console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % ( - url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) - matrix_dict = {'name': matrix_str, - 'duration': matrix.get_duration().total_seconds()} - matrix_dict.update(_process_build(json_url, console_url)) - matrix_list.append(matrix_dict) - - return matrix_list + matrix_list = [] + for matrix in build.get_matrix_runs(): + matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*', + matrix.name).groups()[0] + matrix_tuple = matrix_str.split(',') + json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % ( + url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) + console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % ( + url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2]) + matrix_dict = { + 'name': matrix_str, + 'duration': matrix.get_duration().total_seconds() + } + matrix_dict.update(_process_build(json_url, console_url)) + matrix_list.append(matrix_dict) + + return matrix_list def _process_build(json_url, console_url): - build_result = {} - error_list = [] - try: - html = urllib.urlopen(json_url).read() - test_result = json.loads(html) - print('====> Parsing result from %s' % json_url) - failure_count = test_result['failCount'] - build_result['pass_count'] = test_result['passCount'] - build_result['failure_count'] = failure_count - # This means Jenkins failure occurred. - build_result['no_report_files_found'] = _no_report_files_found(html) - # Only check errors if Jenkins failure occurred. - if build_result['no_report_files_found']: - error_list = _scrape_for_known_errors(html) - except Exception as e: - print('====> Got exception for %s: %s.' % (json_url, str(e))) - print('====> Parsing errors from %s.' % console_url) - html = urllib.urlopen(console_url).read() - build_result['pass_count'] = 0 - build_result['failure_count'] = 1 - # In this case, the string doesn't exist in the result html but the fact - # that we fail to parse the result html indicates Jenkins failure and hence - # no report files were generated. - build_result['no_report_files_found'] = True - error_list = _scrape_for_known_errors(html) - - if error_list: - build_result['error'] = error_list - elif build_result['no_report_files_found']: - build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}] - else: - build_result['error'] = [{'description': '', 'count': 0}] - - return build_result + build_result = {} + error_list = [] + try: + html = urllib.urlopen(json_url).read() + test_result = json.loads(html) + print('====> Parsing result from %s' % json_url) + failure_count = test_result['failCount'] + build_result['pass_count'] = test_result['passCount'] + build_result['failure_count'] = failure_count + # This means Jenkins failure occurred. + build_result['no_report_files_found'] = _no_report_files_found(html) + # Only check errors if Jenkins failure occurred. + if build_result['no_report_files_found']: + error_list = _scrape_for_known_errors(html) + except Exception as e: + print('====> Got exception for %s: %s.' % (json_url, str(e))) + print('====> Parsing errors from %s.' % console_url) + html = urllib.urlopen(console_url).read() + build_result['pass_count'] = 0 + build_result['failure_count'] = 1 + # In this case, the string doesn't exist in the result html but the fact + # that we fail to parse the result html indicates Jenkins failure and hence + # no report files were generated. + build_result['no_report_files_found'] = True + error_list = _scrape_for_known_errors(html) + + if error_list: + build_result['error'] = error_list + elif build_result['no_report_files_found']: + build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}] + else: + build_result['error'] = [{'description': '', 'count': 0}] + + return build_result # parse command line argp = argparse.ArgumentParser(description='Get build statistics.') argp.add_argument('-u', '--username', default='jenkins') -argp.add_argument('-b', '--builds', - choices=['all'] + sorted(_BUILDS.keys()), - nargs='+', - default=['all']) +argp.add_argument( + '-b', + '--builds', + choices=['all'] + sorted(_BUILDS.keys()), + nargs='+', + default=['all']) args = argp.parse_args() J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken') bq = big_query_utils.create_big_query() for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds: - print('====> Build: %s' % build_name) - # Since get_last_completed_build() always fails due to malformatted string - # error, we use get_build_metadata() instead. - job = None - try: - job = J[build_name] - except Exception as e: - print('====> Failed to get build %s: %s.' % (build_name, str(e))) - continue - last_processed_build_number = _get_last_processed_buildnumber(build_name) - last_complete_build_number = job.get_last_completed_buildnumber() - # To avoid processing all builds for a project never looked at. In this case, - # only examine 10 latest builds. - starting_build_number = max(last_processed_build_number+1, - last_complete_build_number-9) - for build_number in xrange(starting_build_number, - last_complete_build_number+1): - print('====> Processing %s build %d.' % (build_name, build_number)) - build = None + print('====> Build: %s' % build_name) + # Since get_last_completed_build() always fails due to malformatted string + # error, we use get_build_metadata() instead. + job = None try: - build = job.get_build_metadata(build_number) - print('====> Build status: %s.' % build.get_status()) - if build.get_status() == 'ABORTED': + job = J[build_name] + except Exception as e: + print('====> Failed to get build %s: %s.' % (build_name, str(e))) continue - # If any build is still running, stop processing this job. Next time, we - # start from where it was left so that all builds are processed - # sequentially. - if build.is_running(): - print('====> Build %d is still running.' % build_number) - break - except KeyError: - print('====> Build %s is missing. Skip.' % build_number) - continue - build_result = {'build_number': build_number, - 'timestamp': str(build.get_timestamp())} - url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number) - if _BUILDS[build_name]: # The build has matrix, such as gRPC_master. - build_result['matrix'] = _process_matrix(build, url_base) - else: - json_url = '%s/testReport/api/json' % url_base - console_url = '%s/consoleFull' % url_base - build_result['duration'] = build.get_duration().total_seconds() - build_stat = _process_build(json_url, console_url) - build_result.update(build_stat) - rows = [big_query_utils.make_row(build_number, build_result)] - if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name, - rows): - print('====> Error uploading result to bigquery.') - sys.exit(1) + last_processed_build_number = _get_last_processed_buildnumber(build_name) + last_complete_build_number = job.get_last_completed_buildnumber() + # To avoid processing all builds for a project never looked at. In this case, + # only examine 10 latest builds. + starting_build_number = max(last_processed_build_number + 1, + last_complete_build_number - 9) + for build_number in xrange(starting_build_number, + last_complete_build_number + 1): + print('====> Processing %s build %d.' % (build_name, build_number)) + build = None + try: + build = job.get_build_metadata(build_number) + print('====> Build status: %s.' % build.get_status()) + if build.get_status() == 'ABORTED': + continue + # If any build is still running, stop processing this job. Next time, we + # start from where it was left so that all builds are processed + # sequentially. + if build.is_running(): + print('====> Build %d is still running.' % build_number) + break + except KeyError: + print('====> Build %s is missing. Skip.' % build_number) + continue + build_result = { + 'build_number': build_number, + 'timestamp': str(build.get_timestamp()) + } + url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number) + if _BUILDS[build_name]: # The build has matrix, such as gRPC_master. + build_result['matrix'] = _process_matrix(build, url_base) + else: + json_url = '%s/testReport/api/json' % url_base + console_url = '%s/consoleFull' % url_base + build_result['duration'] = build.get_duration().total_seconds() + build_stat = _process_build(json_url, console_url) + build_result.update(build_stat) + rows = [big_query_utils.make_row(build_number, build_result)] + if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, + build_name, rows): + print('====> Error uploading result to bigquery.') + sys.exit(1) diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 8f46ea99fd8..99f42988137 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Run interop (cross-language) tests in parallel.""" from __future__ import print_function @@ -37,9 +36,9 @@ import python_utils.jobset as jobset import python_utils.report_utils as report_utils # It's ok to not import because this is only necessary to upload results to BQ. try: - from python_utils.upload_test_results import upload_interop_results_to_bq + from python_utils.upload_test_results import upload_interop_results_to_bq except ImportError as e: - print(e) + print(e) # Docker doesn't clean up after itself, so we do it on exit. atexit.register(lambda: subprocess.call(['stty', 'echo'])) @@ -47,22 +46,24 @@ atexit.register(lambda: subprocess.call(['stty', 'echo'])) ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..')) os.chdir(ROOT) -_DEFAULT_SERVER_PORT=8080 +_DEFAULT_SERVER_PORT = 8080 -_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary', - 'client_compressed_streaming'] +_SKIP_CLIENT_COMPRESSION = [ + 'client_compressed_unary', 'client_compressed_streaming' +] -_SKIP_SERVER_COMPRESSION = ['server_compressed_unary', - 'server_compressed_streaming'] +_SKIP_SERVER_COMPRESSION = [ + 'server_compressed_unary', 'server_compressed_streaming' +] _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION -_SKIP_ADVANCED = ['status_code_and_message', - 'custom_metadata', - 'unimplemented_method', - 'unimplemented_service'] +_SKIP_ADVANCED = [ + 'status_code_and_message', 'custom_metadata', 'unimplemented_method', + 'unimplemented_service' +] -_TEST_TIMEOUT = 3*60 +_TEST_TIMEOUT = 3 * 60 # disable this test on core-based languages, # see https://github.com/grpc/grpc/issues/9779 @@ -77,977 +78,1054 @@ _XML_REPORT = 'report.xml' class CXXLanguage: - def __init__(self): - self.client_cwd = None - self.server_cwd = None - self.http2_cwd = None - self.safename = 'cxx' + def __init__(self): + self.client_cwd = None + self.server_cwd = None + self.http2_cwd = None + self.safename = 'cxx' - def client_cmd(self, args): - return ['bins/opt/interop_client'] + args + def client_cmd(self, args): + return ['bins/opt/interop_client'] + args - def client_cmd_http2interop(self, args): - return ['bins/opt/http2_client'] + args + def client_cmd_http2interop(self, args): + return ['bins/opt/http2_client'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['bins/opt/interop_server'] + args + def server_cmd(self, args): + return ['bins/opt/interop_server'] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return [] + def unimplemented_test_cases_server(self): + return [] - def __str__(self): - return 'c++' + def __str__(self): + return 'c++' class CSharpLanguage: - def __init__(self): - self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45' - self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45' - self.safename = str(self) + def __init__(self): + self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45' + self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45' + self.safename = str(self) - def client_cmd(self, args): - return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args + def client_cmd(self, args): + return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args + def server_cmd(self, args): + return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def __str__(self): - return 'csharp' + def __str__(self): + return 'csharp' class CSharpCoreCLRLanguage: - def __init__(self): - self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0' - self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0' - self.safename = str(self) + def __init__(self): + self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0' + self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0' + self.safename = str(self) - def client_cmd(self, args): - return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args + def client_cmd(self, args): + return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args + def server_cmd(self, args): + return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def __str__(self): - return 'csharpcoreclr' + def __str__(self): + return 'csharpcoreclr' class JavaLanguage: - def __init__(self): - self.client_cwd = '../grpc-java' - self.server_cwd = '../grpc-java' - self.http2_cwd = '../grpc-java' - self.safename = str(self) + def __init__(self): + self.client_cwd = '../grpc-java' + self.server_cwd = '../grpc-java' + self.http2_cwd = '../grpc-java' + self.safename = str(self) - def client_cmd(self, args): - return ['./run-test-client.sh'] + args + def client_cmd(self, args): + return ['./run-test-client.sh'] + args - def client_cmd_http2interop(self, args): - return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args + def client_cmd_http2interop(self, args): + return [ + './interop-testing/build/install/grpc-interop-testing/bin/http2-client' + ] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['./run-test-server.sh'] + args + def server_cmd(self, args): + return ['./run-test-server.sh'] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return [] + def unimplemented_test_cases(self): + return [] - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def __str__(self): - return 'java' + def __str__(self): + return 'java' class JavaOkHttpClient: - def __init__(self): - self.client_cwd = '../grpc-java' - self.safename = 'java' + def __init__(self): + self.client_cwd = '../grpc-java' + self.safename = 'java' - def client_cmd(self, args): - return ['./run-test-client.sh', '--use_okhttp=true'] + args + def client_cmd(self, args): + return ['./run-test-client.sh', '--use_okhttp=true'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_DATA_FRAME_PADDING - def __str__(self): - return 'javaokhttp' + def __str__(self): + return 'javaokhttp' class GoLanguage: - def __init__(self): - # TODO: this relies on running inside docker - self.client_cwd = '/go/src/google.golang.org/grpc/interop/client' - self.server_cwd = '/go/src/google.golang.org/grpc/interop/server' - self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2' - self.safename = str(self) + def __init__(self): + # TODO: this relies on running inside docker + self.client_cwd = '/go/src/google.golang.org/grpc/interop/client' + self.server_cwd = '/go/src/google.golang.org/grpc/interop/server' + self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2' + self.safename = str(self) + + def client_cmd(self, args): + return ['go', 'run', 'client.go'] + args - def client_cmd(self, args): - return ['go', 'run', 'client.go'] + args + def client_cmd_http2interop(self, args): + return ['go', 'run', 'negative_http2_client.go'] + args - def client_cmd_http2interop(self, args): - return ['go', 'run', 'negative_http2_client.go'] + args + def cloud_to_prod_env(self): + return {} - def cloud_to_prod_env(self): - return {} + def server_cmd(self, args): + return ['go', 'run', 'server.go'] + args - def server_cmd(self, args): - return ['go', 'run', 'server.go'] + args + def global_env(self): + return {} - def global_env(self): - return {} + def unimplemented_test_cases(self): + return _SKIP_COMPRESSION - def unimplemented_test_cases(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def __str__(self): + return 'go' - def __str__(self): - return 'go' class Http2Server: - """Represents the HTTP/2 Interop Test server + """Represents the HTTP/2 Interop Test server This pretends to be a language in order to be built and run, but really it isn't. """ - def __init__(self): - self.server_cwd = None - self.safename = str(self) - def server_cmd(self, args): - return ['python test/http2_test/http2_test_server.py'] + def __init__(self): + self.server_cwd = None + self.safename = str(self) + + def server_cmd(self, args): + return ['python test/http2_test/http2_test_server.py'] + + def cloud_to_prod_env(self): + return {} - def cloud_to_prod_env(self): - return {} + def global_env(self): + return {} - def global_env(self): - return {} + def unimplemented_test_cases(self): + return _TEST_CASES + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases(self): - return _TEST_CASES + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases_server(self): + return _TEST_CASES - def unimplemented_test_cases_server(self): - return _TEST_CASES + def __str__(self): + return 'http2' - def __str__(self): - return 'http2' class Http2Client: - """Represents the HTTP/2 Interop Test + """Represents the HTTP/2 Interop Test This pretends to be a language in order to be built and run, but really it isn't. """ - def __init__(self): - self.client_cwd = None - self.safename = str(self) - def client_cmd(self, args): - return ['tools/http2_interop/http2_interop.test', '-test.v'] + args + def __init__(self): + self.client_cwd = None + self.safename = str(self) - def cloud_to_prod_env(self): - return {} + def client_cmd(self, args): + return ['tools/http2_interop/http2_interop.test', '-test.v'] + args - def global_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def unimplemented_test_cases(self): - return _TEST_CASES + def global_env(self): + return {} - def unimplemented_test_cases_server(self): - return _TEST_CASES + def unimplemented_test_cases(self): + return _TEST_CASES + + def unimplemented_test_cases_server(self): + return _TEST_CASES + + def __str__(self): + return 'http2' - def __str__(self): - return 'http2' class NodeLanguage: - def __init__(self): - self.client_cwd = '../grpc-node' - self.server_cwd = '../grpc-node' - self.safename = str(self) + def __init__(self): + self.client_cwd = '../grpc-node' + self.server_cwd = '../grpc-node' + self.safename = str(self) - def client_cmd(self, args): - return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', + def client_cmd(self, args): + return [ + 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', 'node', '--require', './test/fixtures/native_native', - 'test/interop/interop_client.js'] + args + 'test/interop/interop_client.js' + ] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', + def server_cmd(self, args): + return [ + 'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh', 'node', '--require', './test/fixtures/native_native', - 'test/interop/interop_server.js'] + args + 'test/interop/interop_server.js' + ] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def __str__(self): - return 'node' + def __str__(self): + return 'node' class PHPLanguage: - def __init__(self): - self.client_cwd = None - self.safename = str(self) + def __init__(self): + self.client_cwd = None + self.safename = str(self) - def client_cmd(self, args): - return ['src/php/bin/interop_client.sh'] + args + def client_cmd(self, args): + return ['src/php/bin/interop_client.sh'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return [] + def unimplemented_test_cases_server(self): + return [] - def __str__(self): - return 'php' + def __str__(self): + return 'php' class PHP7Language: - def __init__(self): - self.client_cwd = None - self.safename = str(self) + def __init__(self): + self.client_cwd = None + self.safename = str(self) - def client_cmd(self, args): - return ['src/php/bin/interop_client.sh'] + args + def client_cmd(self, args): + return ['src/php/bin/interop_client.sh'] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return [] + def unimplemented_test_cases_server(self): + return [] + + def __str__(self): + return 'php7' - def __str__(self): - return 'php7' class ObjcLanguage: - def __init__(self): - self.client_cwd = 'src/objective-c/tests' - self.safename = str(self) + def __init__(self): + self.client_cwd = 'src/objective-c/tests' + self.safename = str(self) + + def client_cmd(self, args): + # from args, extract the server port and craft xcodebuild command out of it + for arg in args: + port = re.search('--server_port=(\d+)', arg) + if port: + portnum = port.group(1) + cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum + return [cmdline] - def client_cmd(self, args): - # from args, extract the server port and craft xcodebuild command out of it - for arg in args: - port = re.search('--server_port=(\d+)', arg) - if port: - portnum = port.group(1) - cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test'%portnum - return [cmdline] + def cloud_to_prod_env(self): + return {} - def cloud_to_prod_env(self): - return {} + def global_env(self): + return {} - def global_env(self): - return {} + def unimplemented_test_cases(self): + # ObjC test runs all cases with the same command. It ignores the testcase + # cmdline argument. Here we return all but one test cases as unimplemented, + # and depend upon ObjC test's behavior that it runs all cases even when + # we tell it to run just one. + return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases(self): - # ObjC test runs all cases with the same command. It ignores the testcase - # cmdline argument. Here we return all but one test cases as unimplemented, - # and depend upon ObjC test's behavior that it runs all cases even when - # we tell it to run just one. - return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def __str__(self): + return 'objc' - def __str__(self): - return 'objc' class RubyLanguage: - def __init__(self): - self.client_cwd = None - self.server_cwd = None - self.safename = str(self) + def __init__(self): + self.client_cwd = None + self.server_cwd = None + self.safename = str(self) - def client_cmd(self, args): - return ['tools/run_tests/interop/with_rvm.sh', - 'ruby', 'src/ruby/pb/test/client.rb'] + args + def client_cmd(self, args): + return [ + 'tools/run_tests/interop/with_rvm.sh', 'ruby', + 'src/ruby/pb/test/client.rb' + ] + args - def cloud_to_prod_env(self): - return {} + def cloud_to_prod_env(self): + return {} - def server_cmd(self, args): - return ['tools/run_tests/interop/with_rvm.sh', - 'ruby', 'src/ruby/pb/test/server.rb'] + args + def server_cmd(self, args): + return [ + 'tools/run_tests/interop/with_rvm.sh', 'ruby', + 'src/ruby/pb/test/server.rb' + ] + args - def global_env(self): - return {} + def global_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def unimplemented_test_cases(self): + return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION + + def __str__(self): + return 'ruby' - def __str__(self): - return 'ruby' class PythonLanguage: - def __init__(self): - self.client_cwd = None - self.server_cwd = None - self.http2_cwd = None - self.safename = str(self) - - def client_cmd(self, args): - return [ - 'py27/bin/python', - 'src/python/grpcio_tests/setup.py', - 'run_interop', - '--client', - '--args="{}"'.format(' '.join(args)) - ] + def __init__(self): + self.client_cwd = None + self.server_cwd = None + self.http2_cwd = None + self.safename = str(self) - def client_cmd_http2interop(self, args): - return [ 'py27/bin/python', - 'src/python/grpcio_tests/tests/http2/negative_http2_client.py', - ] + args - - def cloud_to_prod_env(self): - return {} - - def server_cmd(self, args): - return [ - 'py27/bin/python', - 'src/python/grpcio_tests/setup.py', - 'run_interop', - '--server', - '--args="{}"'.format(' '.join(args)) - ] + def client_cmd(self, args): + return [ + 'py27/bin/python', 'src/python/grpcio_tests/setup.py', + 'run_interop', '--client', '--args="{}"'.format(' '.join(args)) + ] + + def client_cmd_http2interop(self, args): + return [ + 'py27/bin/python', + 'src/python/grpcio_tests/tests/http2/negative_http2_client.py', + ] + args - def global_env(self): - return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT), - 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)} + def cloud_to_prod_env(self): + return {} - def unimplemented_test_cases(self): - return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + def server_cmd(self, args): + return [ + 'py27/bin/python', 'src/python/grpcio_tests/setup.py', + 'run_interop', '--server', '--args="{}"'.format(' '.join(args)) + ] - def unimplemented_test_cases_server(self): - return _SKIP_COMPRESSION + def global_env(self): + return { + 'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT), + 'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT) + } - def __str__(self): - return 'python' + def unimplemented_test_cases(self): + return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING + + def unimplemented_test_cases_server(self): + return _SKIP_COMPRESSION + + def __str__(self): + return 'python' _LANGUAGES = { - 'c++' : CXXLanguage(), - 'csharp' : CSharpLanguage(), - 'csharpcoreclr' : CSharpCoreCLRLanguage(), - 'go' : GoLanguage(), - 'java' : JavaLanguage(), - 'javaokhttp' : JavaOkHttpClient(), - 'node' : NodeLanguage(), - 'php' : PHPLanguage(), - 'php7' : PHP7Language(), - 'objc' : ObjcLanguage(), - 'ruby' : RubyLanguage(), - 'python' : PythonLanguage(), + 'c++': CXXLanguage(), + 'csharp': CSharpLanguage(), + 'csharpcoreclr': CSharpCoreCLRLanguage(), + 'go': GoLanguage(), + 'java': JavaLanguage(), + 'javaokhttp': JavaOkHttpClient(), + 'node': NodeLanguage(), + 'php': PHPLanguage(), + 'php7': PHP7Language(), + 'objc': ObjcLanguage(), + 'ruby': RubyLanguage(), + 'python': PythonLanguage(), } # languages supported as cloud_to_cloud servers -_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python'] - -_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong', - 'empty_stream', 'client_streaming', 'server_streaming', - 'cancel_after_begin', 'cancel_after_first_response', - 'timeout_on_sleeping_server', 'custom_metadata', - 'status_code_and_message', 'unimplemented_method', - 'client_compressed_unary', 'server_compressed_unary', - 'client_compressed_streaming', 'server_compressed_streaming', - 'unimplemented_service'] - -_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds', - 'oauth2_auth_token', 'per_rpc_creds'] +_SERVERS = [ + 'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python' +] + +_TEST_CASES = [ + 'large_unary', 'empty_unary', 'ping_pong', 'empty_stream', + 'client_streaming', 'server_streaming', 'cancel_after_begin', + 'cancel_after_first_response', 'timeout_on_sleeping_server', + 'custom_metadata', 'status_code_and_message', 'unimplemented_method', + 'client_compressed_unary', 'server_compressed_unary', + 'client_compressed_streaming', 'server_compressed_streaming', + 'unimplemented_service' +] + +_AUTH_TEST_CASES = [ + 'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token', + 'per_rpc_creds' +] _HTTP2_TEST_CASES = ['tls', 'framing'] -_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data', - 'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'] +_HTTP2_SERVER_TEST_CASES = [ + 'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping', + 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test' +] -_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' } +_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { + 'data_frame_padding': 'large_unary', + 'no_df_padding_sanity_test': 'large_unary' +} -_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys() +_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys( +) -_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++'] +_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [ + 'java', 'go', 'python', 'c++' +] DOCKER_WORKDIR_ROOT = '/var/local/git/grpc' + def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None): - """Wraps given cmdline array to create 'docker run' cmdline from it.""" - docker_cmdline = ['docker', 'run', '-i', '--rm=true'] + """Wraps given cmdline array to create 'docker run' cmdline from it.""" + docker_cmdline = ['docker', 'run', '-i', '--rm=true'] - # turn environ into -e docker args - if environ: - for k,v in environ.items(): - docker_cmdline += ['-e', '%s=%s' % (k,v)] + # turn environ into -e docker args + if environ: + for k, v in environ.items(): + docker_cmdline += ['-e', '%s=%s' % (k, v)] - # set working directory - workdir = DOCKER_WORKDIR_ROOT - if cwd: - workdir = os.path.join(workdir, cwd) - docker_cmdline += ['-w', workdir] + # set working directory + workdir = DOCKER_WORKDIR_ROOT + if cwd: + workdir = os.path.join(workdir, cwd) + docker_cmdline += ['-w', workdir] - docker_cmdline += docker_args + [image] + cmdline - return docker_cmdline + docker_cmdline += docker_args + [image] + cmdline + return docker_cmdline def manual_cmdline(docker_cmdline, docker_image): - """Returns docker cmdline adjusted for manual invocation.""" - print_cmdline = [] - for item in docker_cmdline: - if item.startswith('--name='): - continue - if item == docker_image: - item = "$docker_image" - item = item.replace('"', '\\"') - # add quotes when necessary - if any(character.isspace() for character in item): - item = "\"%s\"" % item - print_cmdline.append(item) - return ' '.join(print_cmdline) + """Returns docker cmdline adjusted for manual invocation.""" + print_cmdline = [] + for item in docker_cmdline: + if item.startswith('--name='): + continue + if item == docker_image: + item = "$docker_image" + item = item.replace('"', '\\"') + # add quotes when necessary + if any(character.isspace() for character in item): + item = "\"%s\"" % item + print_cmdline.append(item) + return ' '.join(print_cmdline) def write_cmdlog_maybe(cmdlog, filename): - """Returns docker cmdline adjusted for manual invocation.""" - if cmdlog: - with open(filename, 'w') as logfile: - logfile.write('#!/bin/bash\n') - logfile.writelines("%s\n" % line for line in cmdlog) - print('Command log written to file %s' % filename) + """Returns docker cmdline adjusted for manual invocation.""" + if cmdlog: + with open(filename, 'w') as logfile: + logfile.write('#!/bin/bash\n') + logfile.writelines("%s\n" % line for line in cmdlog) + print('Command log written to file %s' % filename) def bash_cmdline(cmdline): - """Creates bash -c cmdline from args list.""" - # Use login shell: - # * makes error messages clearer if executables are missing - return ['bash', '-c', ' '.join(cmdline)] + """Creates bash -c cmdline from args list.""" + # Use login shell: + # * makes error messages clearer if executables are missing + return ['bash', '-c', ' '.join(cmdline)] def auth_options(language, test_case): - """Returns (cmdline, env) tuple with cloud_to_prod_auth test options.""" + """Returns (cmdline, env) tuple with cloud_to_prod_auth test options.""" - language = str(language) - cmdargs = [] - env = {} + language = str(language) + cmdargs = [] + env = {} - # TODO(jtattermusch): this file path only works inside docker - key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json' - oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo' - key_file_arg = '--service_account_key_file=%s' % key_filepath - default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com' + # TODO(jtattermusch): this file path only works inside docker + key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json' + oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo' + key_file_arg = '--service_account_key_file=%s' % key_filepath + default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com' - if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']: - if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']: - env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath - else: - cmdargs += [key_file_arg] + if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']: + if language in [ + 'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', + 'ruby' + ]: + env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath + else: + cmdargs += [key_file_arg] - if test_case in ['per_rpc_creds', 'oauth2_auth_token']: - cmdargs += [oauth_scope_arg] + if test_case in ['per_rpc_creds', 'oauth2_auth_token']: + cmdargs += [oauth_scope_arg] - if test_case == 'oauth2_auth_token' and language == 'c++': - # C++ oauth2 test uses GCE creds and thus needs to know the default account - cmdargs += [default_account_arg] + if test_case == 'oauth2_auth_token' and language == 'c++': + # C++ oauth2 test uses GCE creds and thus needs to know the default account + cmdargs += [default_account_arg] - if test_case == 'compute_engine_creds': - cmdargs += [oauth_scope_arg, default_account_arg] + if test_case == 'compute_engine_creds': + cmdargs += [oauth_scope_arg, default_account_arg] - return (cmdargs, env) + return (cmdargs, env) def _job_kill_handler(job): - if job._spec.container_name: - dockerjob.docker_kill(job._spec.container_name) - # When the job times out and we decide to kill it, - # we need to wait a before restarting the job - # to prevent "container name already in use" error. - # TODO(jtattermusch): figure out a cleaner way to to this. - time.sleep(2) - - -def cloud_to_prod_jobspec(language, test_case, server_host_name, - server_host_detail, docker_image=None, auth=False, + if job._spec.container_name: + dockerjob.docker_kill(job._spec.container_name) + # When the job times out and we decide to kill it, + # we need to wait a before restarting the job + # to prevent "container name already in use" error. + # TODO(jtattermusch): figure out a cleaner way to to this. + time.sleep(2) + + +def cloud_to_prod_jobspec(language, + test_case, + server_host_name, + server_host_detail, + docker_image=None, + auth=False, manual_cmd_log=None): - """Creates jobspec for cloud-to-prod interop test""" - container_name = None - cmdargs = [ - '--server_host=%s' % server_host_detail[0], - '--server_host_override=%s' % server_host_detail[1], - '--server_port=443', - '--use_tls=true', - '--test_case=%s' % test_case] - environ = dict(language.cloud_to_prod_env(), **language.global_env()) - if auth: - auth_cmdargs, auth_env = auth_options(language, test_case) - cmdargs += auth_cmdargs - environ.update(auth_env) - cmdline = bash_cmdline(language.client_cmd(cmdargs)) - cwd = language.client_cwd - - if docker_image: - container_name = dockerjob.random_name('interop_client_%s' % - language.safename) - cmdline = docker_run_cmdline(cmdline, - image=docker_image, - cwd=cwd, - environ=environ, - docker_args=['--net=host', - '--name=%s' % container_name]) - if manual_cmd_log is not None: - if manual_cmd_log == []: - manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image) - manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) - cwd = None - environ = None - - suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod' - test_job = jobset.JobSpec( - cmdline=cmdline, - cwd=cwd, - environ=environ, - shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name, - test_case), - timeout_seconds=_TEST_TIMEOUT, - flake_retries=4 if args.allow_flakes else 0, - timeout_retries=2 if args.allow_flakes else 0, - kill_handler=_job_kill_handler) - if docker_image: - test_job.container_name = container_name - return test_job - - -def cloud_to_cloud_jobspec(language, test_case, server_name, server_host, - server_port, docker_image=None, insecure=False, - manual_cmd_log=None): - """Creates jobspec for cloud-to-cloud interop test""" - interop_only_options = [ - '--server_host_override=foo.test.google.fr', - '--use_tls=%s' % ('false' if insecure else 'true'), - '--use_test_ca=true', - ] - - client_test_case = test_case - if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: - client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case] - if client_test_case in language.unimplemented_test_cases(): - print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case)) - sys.exit(1) + """Creates jobspec for cloud-to-prod interop test""" + container_name = None + cmdargs = [ + '--server_host=%s' % server_host_detail[0], + '--server_host_override=%s' % server_host_detail[1], + '--server_port=443', '--use_tls=true', '--test_case=%s' % test_case + ] + environ = dict(language.cloud_to_prod_env(), **language.global_env()) + if auth: + auth_cmdargs, auth_env = auth_options(language, test_case) + cmdargs += auth_cmdargs + environ.update(auth_env) + cmdline = bash_cmdline(language.client_cmd(cmdargs)) + cwd = language.client_cwd - common_options = [ - '--test_case=%s' % client_test_case, - '--server_host=%s' % server_host, - '--server_port=%s' % server_port, - ] + if docker_image: + container_name = dockerjob.random_name('interop_client_%s' % + language.safename) + cmdline = docker_run_cmdline( + cmdline, + image=docker_image, + cwd=cwd, + environ=environ, + docker_args=['--net=host', '--name=%s' % container_name]) + if manual_cmd_log is not None: + if manual_cmd_log == []: + manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % + docker_image) + manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) + cwd = None + environ = None + + suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod' + test_job = jobset.JobSpec( + cmdline=cmdline, + cwd=cwd, + environ=environ, + shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name, + test_case), + timeout_seconds=_TEST_TIMEOUT, + flake_retries=4 if args.allow_flakes else 0, + timeout_retries=2 if args.allow_flakes else 0, + kill_handler=_job_kill_handler) + if docker_image: + test_job.container_name = container_name + return test_job + + +def cloud_to_cloud_jobspec(language, + test_case, + server_name, + server_host, + server_port, + docker_image=None, + insecure=False, + manual_cmd_log=None): + """Creates jobspec for cloud-to-cloud interop test""" + interop_only_options = [ + '--server_host_override=foo.test.google.fr', + '--use_tls=%s' % ('false' if insecure else 'true'), + '--use_test_ca=true', + ] - if test_case in _HTTP2_SERVER_TEST_CASES: + client_test_case = test_case if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: - client_options = interop_only_options + common_options - cmdline = bash_cmdline(language.client_cmd(client_options)) - cwd = language.client_cwd - else: - cmdline = bash_cmdline(language.client_cmd_http2interop(common_options)) - cwd = language.http2_cwd - else: - cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options)) - cwd = language.client_cwd + client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[ + test_case] + if client_test_case in language.unimplemented_test_cases(): + print('asking client %s to run unimplemented test case %s' % + (repr(language), client_test_case)) + sys.exit(1) + + common_options = [ + '--test_case=%s' % client_test_case, + '--server_host=%s' % server_host, + '--server_port=%s' % server_port, + ] - environ = language.global_env() - if docker_image and language.safename != 'objc': - # we can't run client in docker for objc. - container_name = dockerjob.random_name('interop_client_%s' % language.safename) - cmdline = docker_run_cmdline(cmdline, - image=docker_image, - environ=environ, - cwd=cwd, - docker_args=['--net=host', - '--name=%s' % container_name]) - if manual_cmd_log is not None: - if manual_cmd_log == []: - manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image) - manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) - cwd = None - - test_job = jobset.JobSpec( - cmdline=cmdline, - cwd=cwd, - environ=environ, - shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name, - test_case), - timeout_seconds=_TEST_TIMEOUT, - flake_retries=4 if args.allow_flakes else 0, - timeout_retries=2 if args.allow_flakes else 0, - kill_handler=_job_kill_handler) - if docker_image: - test_job.container_name = container_name - return test_job + if test_case in _HTTP2_SERVER_TEST_CASES: + if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: + client_options = interop_only_options + common_options + cmdline = bash_cmdline(language.client_cmd(client_options)) + cwd = language.client_cwd + else: + cmdline = bash_cmdline( + language.client_cmd_http2interop(common_options)) + cwd = language.http2_cwd + else: + cmdline = bash_cmdline( + language.client_cmd(common_options + interop_only_options)) + cwd = language.client_cwd + + environ = language.global_env() + if docker_image and language.safename != 'objc': + # we can't run client in docker for objc. + container_name = dockerjob.random_name('interop_client_%s' % + language.safename) + cmdline = docker_run_cmdline( + cmdline, + image=docker_image, + environ=environ, + cwd=cwd, + docker_args=['--net=host', '--name=%s' % container_name]) + if manual_cmd_log is not None: + if manual_cmd_log == []: + manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % + docker_image) + manual_cmd_log.append(manual_cmdline(cmdline, docker_image)) + cwd = None + + test_job = jobset.JobSpec( + cmdline=cmdline, + cwd=cwd, + environ=environ, + shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name, + test_case), + timeout_seconds=_TEST_TIMEOUT, + flake_retries=4 if args.allow_flakes else 0, + timeout_retries=2 if args.allow_flakes else 0, + kill_handler=_job_kill_handler) + if docker_image: + test_job.container_name = container_name + return test_job def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None): - """Create jobspec for running a server""" - container_name = dockerjob.random_name('interop_server_%s' % language.safename) - cmdline = bash_cmdline( - language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT, - '--use_tls=%s' % ('false' if insecure else 'true')])) - environ = language.global_env() - docker_args = ['--name=%s' % container_name] - if language.safename == 'http2': - # we are running the http2 interop server. Open next N ports beginning - # with the server port. These ports are used for http2 interop test - # (one test case per port). - docker_args += list( - itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i)) - for i in range( - len(_HTTP2_SERVER_TEST_CASES)))) - # Enable docker's healthcheck mechanism. - # This runs a Python script inside the container every second. The script - # pings the http2 server to verify it is ready. The 'health-retries' flag - # specifies the number of consecutive failures before docker will report - # the container's status as 'unhealthy'. Prior to the first 'health_retries' - # failures or the first success, the status will be 'starting'. 'docker ps' - # or 'docker inspect' can be used to see the health of the container on the - # command line. - docker_args += [ - '--health-cmd=python test/http2_test/http2_server_health_check.py ' - '--server_host=%s --server_port=%d' - % ('localhost', _DEFAULT_SERVER_PORT), - '--health-interval=1s', - '--health-retries=5', - '--health-timeout=10s', - ] + """Create jobspec for running a server""" + container_name = dockerjob.random_name('interop_server_%s' % + language.safename) + cmdline = bash_cmdline( + language.server_cmd([ + '--port=%s' % _DEFAULT_SERVER_PORT, '--use_tls=%s' % ( + 'false' if insecure else 'true') + ])) + environ = language.global_env() + docker_args = ['--name=%s' % container_name] + if language.safename == 'http2': + # we are running the http2 interop server. Open next N ports beginning + # with the server port. These ports are used for http2 interop test + # (one test case per port). + docker_args += list( + itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i)) + for i in range( + len(_HTTP2_SERVER_TEST_CASES)))) + # Enable docker's healthcheck mechanism. + # This runs a Python script inside the container every second. The script + # pings the http2 server to verify it is ready. The 'health-retries' flag + # specifies the number of consecutive failures before docker will report + # the container's status as 'unhealthy'. Prior to the first 'health_retries' + # failures or the first success, the status will be 'starting'. 'docker ps' + # or 'docker inspect' can be used to see the health of the container on the + # command line. + docker_args += [ + '--health-cmd=python test/http2_test/http2_server_health_check.py ' + '--server_host=%s --server_port=%d' % + ('localhost', _DEFAULT_SERVER_PORT), + '--health-interval=1s', + '--health-retries=5', + '--health-timeout=10s', + ] - else: - docker_args += ['-p', str(_DEFAULT_SERVER_PORT)] - - docker_cmdline = docker_run_cmdline(cmdline, - image=docker_image, - cwd=language.server_cwd, - environ=environ, - docker_args=docker_args) - if manual_cmd_log is not None: - if manual_cmd_log == []: - manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image) - manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image)) - server_job = jobset.JobSpec( - cmdline=docker_cmdline, - environ=environ, - shortname='interop_server_%s' % language, - timeout_seconds=30*60) - server_job.container_name = container_name - return server_job + else: + docker_args += ['-p', str(_DEFAULT_SERVER_PORT)] + + docker_cmdline = docker_run_cmdline( + cmdline, + image=docker_image, + cwd=language.server_cwd, + environ=environ, + docker_args=docker_args) + if manual_cmd_log is not None: + if manual_cmd_log == []: + manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % + docker_image) + manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image)) + server_job = jobset.JobSpec( + cmdline=docker_cmdline, + environ=environ, + shortname='interop_server_%s' % language, + timeout_seconds=30 * 60) + server_job.container_name = container_name + return server_job def build_interop_image_jobspec(language, tag=None): - """Creates jobspec for building interop docker image for a language""" - if not tag: - tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4()) - env = {'INTEROP_IMAGE': tag, - 'BASE_NAME': 'grpc_interop_%s' % language.safename} - if not args.travis: - env['TTY_FLAG'] = '-t' - # This env variable is used to get around the github rate limit - # error when running the PHP `composer install` command - host_file = '%s/.composer/auth.json' % os.environ['HOME'] - if language.safename == 'php' and os.path.exists(host_file): - env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \ - '-v %s:/root/.composer/auth.json:ro' % host_file - build_job = jobset.JobSpec( - cmdline=['tools/run_tests/dockerize/build_interop_image.sh'], - environ=env, - shortname='build_docker_%s' % (language), - timeout_seconds=30*60) - build_job.tag = tag - return build_job + """Creates jobspec for building interop docker image for a language""" + if not tag: + tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4()) + env = { + 'INTEROP_IMAGE': tag, + 'BASE_NAME': 'grpc_interop_%s' % language.safename + } + if not args.travis: + env['TTY_FLAG'] = '-t' + # This env variable is used to get around the github rate limit + # error when running the PHP `composer install` command + host_file = '%s/.composer/auth.json' % os.environ['HOME'] + if language.safename == 'php' and os.path.exists(host_file): + env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \ + '-v %s:/root/.composer/auth.json:ro' % host_file + build_job = jobset.JobSpec( + cmdline=['tools/run_tests/dockerize/build_interop_image.sh'], + environ=env, + shortname='build_docker_%s' % (language), + timeout_seconds=30 * 60) + build_job.tag = tag + return build_job def aggregate_http2_results(stdout): - match = re.search(r'\{"cases[^\]]*\]\}', stdout) - if not match: - return None - - results = json.loads(match.group(0)) - skipped = 0 - passed = 0 - failed = 0 - failed_cases = [] - for case in results['cases']: - if case.get('skipped', False): - skipped += 1 - else: - if case.get('passed', False): - passed += 1 - else: - failed += 1 - failed_cases.append(case.get('name', "NONAME")) - return { - 'passed': passed, - 'failed': failed, - 'skipped': skipped, - 'failed_cases': ', '.join(failed_cases), - 'percent': 1.0 * passed / (passed + failed) - } + match = re.search(r'\{"cases[^\]]*\]\}', stdout) + if not match: + return None + + results = json.loads(match.group(0)) + skipped = 0 + passed = 0 + failed = 0 + failed_cases = [] + for case in results['cases']: + if case.get('skipped', False): + skipped += 1 + else: + if case.get('passed', False): + passed += 1 + else: + failed += 1 + failed_cases.append(case.get('name', "NONAME")) + return { + 'passed': passed, + 'failed': failed, + 'skipped': skipped, + 'failed_cases': ', '.join(failed_cases), + 'percent': 1.0 * passed / (passed + failed) + } + # A dictionary of prod servers to test. # Format: server_name: (server_host, server_host_override, errors_allowed) # TODO(adelez): implement logic for errors_allowed where if the indicated tests # fail, they don't impact the overall test result. prod_servers = { - 'default': ('216.239.32.254', - 'grpc-test.sandbox.googleapis.com', False), - 'gateway_v2': ('216.239.32.254', - 'grpc-test2.sandbox.googleapis.com', True), + 'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False), + 'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True), 'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com', False), 'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com', True), - 'gateway_v4': ('216.239.32.254', - 'grpc-test4.sandbox.googleapis.com', True), + 'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True), 'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com', True), } argp = argparse.ArgumentParser(description='Run interop tests.') -argp.add_argument('-l', '--language', - choices=['all'] + sorted(_LANGUAGES), - nargs='+', - default=['all'], - help='Clients to run. Objc client can be only run on OSX.') +argp.add_argument( + '-l', + '--language', + choices=['all'] + sorted(_LANGUAGES), + nargs='+', + default=['all'], + help='Clients to run. Objc client can be only run on OSX.') argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int) -argp.add_argument('--cloud_to_prod', - default=False, - action='store_const', - const=True, - help='Run cloud_to_prod tests.') -argp.add_argument('--cloud_to_prod_auth', - default=False, - action='store_const', - const=True, - help='Run cloud_to_prod_auth tests.') -argp.add_argument('--prod_servers', - choices=prod_servers.keys(), - default=['default'], - nargs='+', - help=('The servers to run cloud_to_prod and ' - 'cloud_to_prod_auth tests against.')) -argp.add_argument('-s', '--server', - choices=['all'] + sorted(_SERVERS), - nargs='+', - help='Run cloud_to_cloud servers in a separate docker ' + - 'image. Servers can only be started automatically if ' + - '--use_docker option is enabled.', - default=[]) -argp.add_argument('--override_server', - action='append', - type=lambda kv: kv.split('='), - help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000', - default=[]) -argp.add_argument('-t', '--travis', - default=False, - action='store_const', - const=True) -argp.add_argument('-v', '--verbose', - default=False, - action='store_const', - const=True) -argp.add_argument('--use_docker', - default=False, - action='store_const', - const=True, - help='Run all the interop tests under docker. That provides ' + - 'additional isolation and prevents the need to install ' + - 'language specific prerequisites. Only available on Linux.') -argp.add_argument('--allow_flakes', - default=False, - action='store_const', - const=True, - help='Allow flaky tests to show as passing (re-runs failed tests up to five times)') -argp.add_argument('--manual_run', - default=False, - action='store_const', - const=True, - help='Prepare things for running interop tests manually. ' + - 'Preserve docker images after building them and skip ' - 'actually running the tests. Only print commands to run by ' + - 'hand.') -argp.add_argument('--http2_interop', - default=False, - action='store_const', - const=True, - help='Enable HTTP/2 client edge case testing. (Bad client, good server)') -argp.add_argument('--http2_server_interop', - default=False, - action='store_const', - const=True, - help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests') -argp.add_argument('--insecure', - default=False, - action='store_const', - const=True, - help='Whether to use secure channel.') -argp.add_argument('--internal_ci', - default=False, - action='store_const', - const=True, - help=('Put reports into subdirectories to improve ' - 'presentation of results by Internal CI.')) -argp.add_argument('--bq_result_table', - default='', - type=str, - nargs='?', - help='Upload test results to a specified BQ table.') +argp.add_argument( + '--cloud_to_prod', + default=False, + action='store_const', + const=True, + help='Run cloud_to_prod tests.') +argp.add_argument( + '--cloud_to_prod_auth', + default=False, + action='store_const', + const=True, + help='Run cloud_to_prod_auth tests.') +argp.add_argument( + '--prod_servers', + choices=prod_servers.keys(), + default=['default'], + nargs='+', + help=('The servers to run cloud_to_prod and ' + 'cloud_to_prod_auth tests against.')) +argp.add_argument( + '-s', + '--server', + choices=['all'] + sorted(_SERVERS), + nargs='+', + help='Run cloud_to_cloud servers in a separate docker ' + + 'image. Servers can only be started automatically if ' + + '--use_docker option is enabled.', + default=[]) +argp.add_argument( + '--override_server', + action='append', + type=lambda kv: kv.split('='), + help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000', + default=[]) +argp.add_argument( + '-t', '--travis', default=False, action='store_const', const=True) +argp.add_argument( + '-v', '--verbose', default=False, action='store_const', const=True) +argp.add_argument( + '--use_docker', + default=False, + action='store_const', + const=True, + help='Run all the interop tests under docker. That provides ' + + 'additional isolation and prevents the need to install ' + + 'language specific prerequisites. Only available on Linux.') +argp.add_argument( + '--allow_flakes', + default=False, + action='store_const', + const=True, + help='Allow flaky tests to show as passing (re-runs failed tests up to five times)' +) +argp.add_argument( + '--manual_run', + default=False, + action='store_const', + const=True, + help='Prepare things for running interop tests manually. ' + + 'Preserve docker images after building them and skip ' + 'actually running the tests. Only print commands to run by ' + 'hand.') +argp.add_argument( + '--http2_interop', + default=False, + action='store_const', + const=True, + help='Enable HTTP/2 client edge case testing. (Bad client, good server)') +argp.add_argument( + '--http2_server_interop', + default=False, + action='store_const', + const=True, + help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests' +) +argp.add_argument( + '--insecure', + default=False, + action='store_const', + const=True, + help='Whether to use secure channel.') +argp.add_argument( + '--internal_ci', + default=False, + action='store_const', + const=True, + help=('Put reports into subdirectories to improve ' + 'presentation of results by Internal CI.')) +argp.add_argument( + '--bq_result_table', + default='', + type=str, + nargs='?', + help='Upload test results to a specified BQ table.') args = argp.parse_args() -servers = set(s for s in itertools.chain.from_iterable(_SERVERS - if x == 'all' else [x] - for x in args.server)) +servers = set( + s + for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x] + for x in args.server)) if args.use_docker: - if not args.travis: - print('Seen --use_docker flag, will run interop tests under docker.') - print('') - print('IMPORTANT: The changes you are testing need to be locally committed') - print('because only the committed changes in the current branch will be') - print('copied to the docker environment.') - time.sleep(5) + if not args.travis: + print('Seen --use_docker flag, will run interop tests under docker.') + print('') + print( + 'IMPORTANT: The changes you are testing need to be locally committed' + ) + print( + 'because only the committed changes in the current branch will be') + print('copied to the docker environment.') + time.sleep(5) if args.manual_run and not args.use_docker: - print('--manual_run is only supported with --use_docker option enabled.') - sys.exit(1) + print('--manual_run is only supported with --use_docker option enabled.') + sys.exit(1) if not args.use_docker and servers: - print('Running interop servers is only supported with --use_docker option enabled.') - sys.exit(1) - + print( + 'Running interop servers is only supported with --use_docker option enabled.' + ) + sys.exit(1) # we want to include everything but objc in 'all' # because objc won't run on non-mac platforms all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc']) -languages = set(_LANGUAGES[l] - for l in itertools.chain.from_iterable( - all_but_objc if x == 'all' else [x] - for x in args.language)) +languages = set( + _LANGUAGES[l] + for l in itertools.chain.from_iterable(all_but_objc if x == 'all' else [x] + for x in args.language)) languages_http2_clients_for_http2_server_interop = set() if args.http2_server_interop: - languages_http2_clients_for_http2_server_interop = set( - _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES - if 'all' in args.language or l in args.language) + languages_http2_clients_for_http2_server_interop = set( + _LANGUAGES[l] + for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES + if 'all' in args.language or l in args.language) http2Interop = Http2Client() if args.http2_interop else None http2InteropServer = Http2Server() if args.http2_server_interop else None -docker_images={} +docker_images = {} if args.use_docker: - # languages for which to build docker images - languages_to_build = set( - _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers])) - languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop - - if args.http2_interop: - languages_to_build.add(http2Interop) - - if args.http2_server_interop: - languages_to_build.add(http2InteropServer) - - build_jobs = [] - for l in languages_to_build: - if str(l) == 'objc': - # we don't need to build a docker image for objc - continue - job = build_interop_image_jobspec(l) - docker_images[str(l)] = job.tag - build_jobs.append(job) - - if build_jobs: - jobset.message('START', 'Building interop docker images.', do_newline=True) - if args.verbose: - print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs)) + # languages for which to build docker images + languages_to_build = set( + _LANGUAGES[k] + for k in set([str(l) for l in languages] + [s for s in servers])) + languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop - num_failures, _ = jobset.run( - build_jobs, newline_on_success=True, maxjobs=args.jobs) - if num_failures == 0: - jobset.message('SUCCESS', 'All docker images built successfully.', - do_newline=True) - else: - jobset.message('FAILED', 'Failed to build interop docker images.', - do_newline=True) - for image in six.itervalues(docker_images): - dockerjob.remove_image(image, skip_nonexistent=True) - sys.exit(1) + if args.http2_interop: + languages_to_build.add(http2Interop) + + if args.http2_server_interop: + languages_to_build.add(http2InteropServer) + + build_jobs = [] + for l in languages_to_build: + if str(l) == 'objc': + # we don't need to build a docker image for objc + continue + job = build_interop_image_jobspec(l) + docker_images[str(l)] = job.tag + build_jobs.append(job) + + if build_jobs: + jobset.message( + 'START', 'Building interop docker images.', do_newline=True) + if args.verbose: + print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs)) + + num_failures, _ = jobset.run( + build_jobs, newline_on_success=True, maxjobs=args.jobs) + if num_failures == 0: + jobset.message( + 'SUCCESS', + 'All docker images built successfully.', + do_newline=True) + else: + jobset.message( + 'FAILED', + 'Failed to build interop docker images.', + do_newline=True) + for image in six.itervalues(docker_images): + dockerjob.remove_image(image, skip_nonexistent=True) + sys.exit(1) server_manual_cmd_log = [] if args.manual_run else None client_manual_cmd_log = [] if args.manual_run else None @@ -1056,214 +1134,236 @@ client_manual_cmd_log = [] if args.manual_run else None server_jobs = {} server_addresses = {} try: - for s in servers: - lang = str(s) - spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang), - args.insecure, manual_cmd_log=server_manual_cmd_log) - if not args.manual_run: - job = dockerjob.DockerJob(spec) - server_jobs[lang] = job - server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT)) - else: - # don't run the server, set server port to a placeholder value - server_addresses[lang] = ('localhost', '${SERVER_PORT}') - - http2_server_job = None - if args.http2_server_interop: - # launch a HTTP2 server emulator that creates edge cases - lang = str(http2InteropServer) - spec = server_jobspec(http2InteropServer, docker_images.get(lang), - manual_cmd_log=server_manual_cmd_log) - if not args.manual_run: - http2_server_job = dockerjob.DockerJob(spec) - server_jobs[lang] = http2_server_job - else: - # don't run the server, set server port to a placeholder value - server_addresses[lang] = ('localhost', '${SERVER_PORT}') - - jobs = [] - if args.cloud_to_prod: - if args.insecure: - print('TLS is always enabled for cloud_to_prod scenarios.') - for server_host_name in args.prod_servers: - for language in languages: - for test_case in _TEST_CASES: - if not test_case in language.unimplemented_test_cases(): - if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION: - test_job = cloud_to_prod_jobspec( - language, test_case, server_host_name, - prod_servers[server_host_name], - docker_image=docker_images.get(str(language)), - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - if args.http2_interop: - for test_case in _HTTP2_TEST_CASES: - test_job = cloud_to_prod_jobspec( - http2Interop, test_case, server_host_name, - prod_servers[server_host_name], - docker_image=docker_images.get(str(http2Interop)), - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - if args.cloud_to_prod_auth: - if args.insecure: - print('TLS is always enabled for cloud_to_prod scenarios.') - for server_host_name in args.prod_servers: - for language in languages: - for test_case in _AUTH_TEST_CASES: - if not test_case in language.unimplemented_test_cases(): - test_job = cloud_to_prod_jobspec( - language, test_case, server_host_name, - prod_servers[server_host_name], - docker_image=docker_images.get(str(language)), auth=True, - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - for server in args.override_server: - server_name = server[0] - (server_host, server_port) = server[1].split(':') - server_addresses[server_name] = (server_host, server_port) - - for server_name, server_address in server_addresses.items(): - (server_host, server_port) = server_address - server_language = _LANGUAGES.get(server_name, None) - skip_server = [] # test cases unimplemented by server - if server_language: - skip_server = server_language.unimplemented_test_cases_server() - for language in languages: - for test_case in _TEST_CASES: - if not test_case in language.unimplemented_test_cases(): - if not test_case in skip_server: - test_job = cloud_to_cloud_jobspec(language, - test_case, - server_name, - server_host, - server_port, - docker_image=docker_images.get(str(language)), - insecure=args.insecure, - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - if args.http2_interop: - for test_case in _HTTP2_TEST_CASES: - if server_name == "go": - # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434 - continue - test_job = cloud_to_cloud_jobspec(http2Interop, - test_case, - server_name, - server_host, - server_port, - docker_image=docker_images.get(str(http2Interop)), - insecure=args.insecure, - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - if args.http2_server_interop: - if not args.manual_run: - http2_server_job.wait_for_healthy(timeout_seconds=600) - for language in languages_http2_clients_for_http2_server_interop: - for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS): - offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) - server_port = _DEFAULT_SERVER_PORT+offset + for s in servers: + lang = str(s) + spec = server_jobspec( + _LANGUAGES[lang], + docker_images.get(lang), + args.insecure, + manual_cmd_log=server_manual_cmd_log) if not args.manual_run: - server_port = http2_server_job.mapped_port(server_port) - test_job = cloud_to_cloud_jobspec(language, - test_case, - str(http2InteropServer), - 'localhost', - server_port, - docker_image=docker_images.get(str(language)), - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - for language in languages: - # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of - # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather - # than specialized http2 clients, reusing existing test implementations. - # For example, in the "data_frame_padding" test, use language's gRPC - # interop clients and make them think that theyre running "large_unary" - # test case. This avoids implementing a new test case in each language. - for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: - if test_case not in language.unimplemented_test_cases(): - offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) - server_port = _DEFAULT_SERVER_PORT+offset - if not args.manual_run: - server_port = http2_server_job.mapped_port(server_port) - if not args.insecure: - print(('Creating grpc cient to http2 server test case with insecure connection, even though' - ' args.insecure is False. Http2 test server only supports insecure connections.')) - test_job = cloud_to_cloud_jobspec(language, - test_case, - str(http2InteropServer), - 'localhost', - server_port, - docker_image=docker_images.get(str(language)), - insecure=True, - manual_cmd_log=client_manual_cmd_log) - jobs.append(test_job) - - if not jobs: - print('No jobs to run.') - for image in six.itervalues(docker_images): - dockerjob.remove_image(image, skip_nonexistent=True) - sys.exit(1) - - if args.manual_run: - print('All tests will skipped --manual_run option is active.') - - if args.verbose: - print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs)) + job = dockerjob.DockerJob(spec) + server_jobs[lang] = job + server_addresses[lang] = ('localhost', + job.mapped_port(_DEFAULT_SERVER_PORT)) + else: + # don't run the server, set server port to a placeholder value + server_addresses[lang] = ('localhost', '${SERVER_PORT}') + + http2_server_job = None + if args.http2_server_interop: + # launch a HTTP2 server emulator that creates edge cases + lang = str(http2InteropServer) + spec = server_jobspec( + http2InteropServer, + docker_images.get(lang), + manual_cmd_log=server_manual_cmd_log) + if not args.manual_run: + http2_server_job = dockerjob.DockerJob(spec) + server_jobs[lang] = http2_server_job + else: + # don't run the server, set server port to a placeholder value + server_addresses[lang] = ('localhost', '${SERVER_PORT}') + + jobs = [] + if args.cloud_to_prod: + if args.insecure: + print('TLS is always enabled for cloud_to_prod scenarios.') + for server_host_name in args.prod_servers: + for language in languages: + for test_case in _TEST_CASES: + if not test_case in language.unimplemented_test_cases(): + if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION: + test_job = cloud_to_prod_jobspec( + language, + test_case, + server_host_name, + prod_servers[server_host_name], + docker_image=docker_images.get(str(language)), + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + if args.http2_interop: + for test_case in _HTTP2_TEST_CASES: + test_job = cloud_to_prod_jobspec( + http2Interop, + test_case, + server_host_name, + prod_servers[server_host_name], + docker_image=docker_images.get(str(http2Interop)), + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + if args.cloud_to_prod_auth: + if args.insecure: + print('TLS is always enabled for cloud_to_prod scenarios.') + for server_host_name in args.prod_servers: + for language in languages: + for test_case in _AUTH_TEST_CASES: + if not test_case in language.unimplemented_test_cases(): + test_job = cloud_to_prod_jobspec( + language, + test_case, + server_host_name, + prod_servers[server_host_name], + docker_image=docker_images.get(str(language)), + auth=True, + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + for server in args.override_server: + server_name = server[0] + (server_host, server_port) = server[1].split(':') + server_addresses[server_name] = (server_host, server_port) + + for server_name, server_address in server_addresses.items(): + (server_host, server_port) = server_address + server_language = _LANGUAGES.get(server_name, None) + skip_server = [] # test cases unimplemented by server + if server_language: + skip_server = server_language.unimplemented_test_cases_server() + for language in languages: + for test_case in _TEST_CASES: + if not test_case in language.unimplemented_test_cases(): + if not test_case in skip_server: + test_job = cloud_to_cloud_jobspec( + language, + test_case, + server_name, + server_host, + server_port, + docker_image=docker_images.get(str(language)), + insecure=args.insecure, + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + if args.http2_interop: + for test_case in _HTTP2_TEST_CASES: + if server_name == "go": + # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434 + continue + test_job = cloud_to_cloud_jobspec( + http2Interop, + test_case, + server_name, + server_host, + server_port, + docker_image=docker_images.get(str(http2Interop)), + insecure=args.insecure, + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + if args.http2_server_interop: + if not args.manual_run: + http2_server_job.wait_for_healthy(timeout_seconds=600) + for language in languages_http2_clients_for_http2_server_interop: + for test_case in set(_HTTP2_SERVER_TEST_CASES) - set( + _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS): + offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) + server_port = _DEFAULT_SERVER_PORT + offset + if not args.manual_run: + server_port = http2_server_job.mapped_port(server_port) + test_job = cloud_to_cloud_jobspec( + language, + test_case, + str(http2InteropServer), + 'localhost', + server_port, + docker_image=docker_images.get(str(language)), + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + for language in languages: + # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of + # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather + # than specialized http2 clients, reusing existing test implementations. + # For example, in the "data_frame_padding" test, use language's gRPC + # interop clients and make them think that theyre running "large_unary" + # test case. This avoids implementing a new test case in each language. + for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS: + if test_case not in language.unimplemented_test_cases(): + offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case) + server_port = _DEFAULT_SERVER_PORT + offset + if not args.manual_run: + server_port = http2_server_job.mapped_port(server_port) + if not args.insecure: + print(( + 'Creating grpc cient to http2 server test case with insecure connection, even though' + ' args.insecure is False. Http2 test server only supports insecure connections.' + )) + test_job = cloud_to_cloud_jobspec( + language, + test_case, + str(http2InteropServer), + 'localhost', + server_port, + docker_image=docker_images.get(str(language)), + insecure=True, + manual_cmd_log=client_manual_cmd_log) + jobs.append(test_job) + + if not jobs: + print('No jobs to run.') + for image in six.itervalues(docker_images): + dockerjob.remove_image(image, skip_nonexistent=True) + sys.exit(1) + + if args.manual_run: + print('All tests will skipped --manual_run option is active.') - num_failures, resultset = jobset.run(jobs, newline_on_success=True, - maxjobs=args.jobs, - skip_jobs=args.manual_run) - if args.bq_result_table and resultset: - upload_interop_results_to_bq(resultset, args.bq_result_table, args) - if num_failures: - jobset.message('FAILED', 'Some tests failed', do_newline=True) - else: - jobset.message('SUCCESS', 'All tests passed', do_newline=True) + if args.verbose: + print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs)) + + num_failures, resultset = jobset.run( + jobs, + newline_on_success=True, + maxjobs=args.jobs, + skip_jobs=args.manual_run) + if args.bq_result_table and resultset: + upload_interop_results_to_bq(resultset, args.bq_result_table, args) + if num_failures: + jobset.message('FAILED', 'Some tests failed', do_newline=True) + else: + jobset.message('SUCCESS', 'All tests passed', do_newline=True) - write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh') - write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh') + write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh') + write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh') - xml_report_name = _XML_REPORT - if args.internal_ci: - xml_report_name = _INTERNAL_CL_XML_REPORT - report_utils.render_junit_xml_report(resultset, xml_report_name) + xml_report_name = _XML_REPORT + if args.internal_ci: + xml_report_name = _INTERNAL_CL_XML_REPORT + report_utils.render_junit_xml_report(resultset, xml_report_name) - for name, job in resultset.items(): - if "http2" in name: - job[0].http2results = aggregate_http2_results(job[0].message) + for name, job in resultset.items(): + if "http2" in name: + job[0].http2results = aggregate_http2_results(job[0].message) - http2_server_test_cases = ( - _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else []) + http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES + if args.http2_server_interop else []) - report_utils.render_interop_html_report( - set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES, - _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures, - args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers, - args.http2_interop) + report_utils.render_interop_html_report( + set([str(l) for l in languages]), servers, _TEST_CASES, + _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset, + num_failures, args.cloud_to_prod_auth or args.cloud_to_prod, + args.prod_servers, args.http2_interop) - if num_failures: - sys.exit(1) - else: - sys.exit(0) + if num_failures: + sys.exit(1) + else: + sys.exit(0) except Exception as e: - print('exception occurred:') - traceback.print_exc(file=sys.stdout) + print('exception occurred:') + traceback.print_exc(file=sys.stdout) finally: - # Check if servers are still running. - for server, job in server_jobs.items(): - if not job.is_running(): - print('Server "%s" has exited prematurely.' % server) + # Check if servers are still running. + for server, job in server_jobs.items(): + if not job.is_running(): + print('Server "%s" has exited prematurely.' % server) - dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)]) + dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)]) - for image in six.itervalues(docker_images): - if not args.manual_run: - print('Removing docker image %s' % image) - dockerjob.remove_image(image) - else: - print('Preserving docker image: %s' % image) + for image in six.itervalues(docker_images): + if not args.manual_run: + print('Removing docker image %s' % image) + dockerjob.remove_image(image) + else: + print('Preserving docker image: %s' % image) diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index c136af58cb8..561217ceb1f 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -23,26 +23,31 @@ import argparse import python_utils.jobset as jobset import python_utils.start_port_server as start_port_server -sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff')) +sys.path.append( + os.path.join( + os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', + 'bm_diff')) import bm_constants flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph') os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) if not os.path.exists('reports'): - os.makedirs('reports') + os.makedirs('reports') start_port_server.start_port_server() + def fnize(s): - out = '' - for c in s: - if c in '<>, /': - if len(out) and out[-1] == '_': continue - out += '_' - else: - out += c - return out + out = '' + for c in s: + if c in '<>, /': + if len(out) and out[-1] == '_': continue + out += '_' + else: + out += c + return out + # index html index_html = """ @@ -53,169 +58,202 @@ index_html = """
""" + def heading(name): - global index_html - index_html += "%s\n" % cgi.escape(txt) + global index_html + index_html += "
%s\n" % cgi.escape(txt) + def collect_latency(bm_name, args): - """generate latency profiles""" - benchmarks = [] - profile_analysis = [] - cleanup = [] - - heading('Latency Profiles: %s' % bm_name) - subprocess.check_call( - ['make', bm_name, - 'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()]) - for line in subprocess.check_output(['bins/basicprof/%s' % bm_name, - '--benchmark_list_tests']).splitlines(): - link(line, '%s.txt' % fnize(line)) - benchmarks.append( - jobset.JobSpec(['bins/basicprof/%s' % bm_name, - '--benchmark_filter=^%s$' % line, - '--benchmark_min_time=0.05'], - environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}, - shortname='profile-%s' % fnize(line))) - profile_analysis.append( - jobset.JobSpec([sys.executable, - 'tools/profiling/latency_profile/profile_analyzer.py', - '--source', '%s.trace' % fnize(line), '--fmt', 'simple', - '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60, - shortname='analyze-%s' % fnize(line))) - cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) - # periodically flush out the list of jobs: profile_analysis jobs at least - # consume upwards of five gigabytes of ram in some cases, and so analysing - # hundreds of them at once is impractical -- but we want at least some - # concurrency or the work takes too long - if len(benchmarks) >= min(16, multiprocessing.cpu_count()): - # run up to half the cpu count: each benchmark can use up to two cores - # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) - jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) - jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) - benchmarks = [] - profile_analysis = [] - cleanup = [] - # run the remaining benchmarks that weren't flushed - if len(benchmarks): - jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) - jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) - jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + """generate latency profiles""" + benchmarks = [] + profile_analysis = [] + cleanup = [] + + heading('Latency Profiles: %s' % bm_name) + subprocess.check_call([ + 'make', bm_name, 'CONFIG=basicprof', '-j', + '%d' % multiprocessing.cpu_count() + ]) + for line in subprocess.check_output( + ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines(): + link(line, '%s.txt' % fnize(line)) + benchmarks.append( + jobset.JobSpec( + [ + 'bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % + line, '--benchmark_min_time=0.05' + ], + environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}, + shortname='profile-%s' % fnize(line))) + profile_analysis.append( + jobset.JobSpec( + [ + sys.executable, + 'tools/profiling/latency_profile/profile_analyzer.py', + '--source', '%s.trace' % fnize(line), '--fmt', 'simple', + '--out', 'reports/%s.txt' % fnize(line) + ], + timeout_seconds=20 * 60, + shortname='analyze-%s' % fnize(line))) + cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) + # periodically flush out the list of jobs: profile_analysis jobs at least + # consume upwards of five gigabytes of ram in some cases, and so analysing + # hundreds of them at once is impractical -- but we want at least some + # concurrency or the work takes too long + if len(benchmarks) >= min(16, multiprocessing.cpu_count()): + # run up to half the cpu count: each benchmark can use up to two cores + # (one for the microbenchmark, one for the data flush) + jobset.run( + benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2)) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + benchmarks = [] + profile_analysis = [] + cleanup = [] + # run the remaining benchmarks that weren't flushed + if len(benchmarks): + jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2)) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + def collect_perf(bm_name, args): - """generate flamegraphs""" - heading('Flamegraphs: %s' % bm_name) - subprocess.check_call( - ['make', bm_name, - 'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()]) - benchmarks = [] - profile_analysis = [] - cleanup = [] - for line in subprocess.check_output(['bins/mutrace/%s' % bm_name, - '--benchmark_list_tests']).splitlines(): - link(line, '%s.svg' % fnize(line)) - benchmarks.append( - jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line), - '-g', '-F', '997', - 'bins/mutrace/%s' % bm_name, - '--benchmark_filter=^%s$' % line, - '--benchmark_min_time=10'], - shortname='perf-%s' % fnize(line))) - profile_analysis.append( - jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'], - environ = { - 'PERF_BASE_NAME': fnize(line), - 'OUTPUT_DIR': 'reports', - 'OUTPUT_FILENAME': fnize(line), - }, - shortname='flame-%s' % fnize(line))) - cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) - cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) - # periodically flush out the list of jobs: temporary space required for this - # processing is large - if len(benchmarks) >= 20: - # run up to half the cpu count: each benchmark can use up to two cores - # (one for the microbenchmark, one for the data flush) - jobset.run(benchmarks, maxjobs=1) - jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) - jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) - benchmarks = [] - profile_analysis = [] - cleanup = [] - # run the remaining benchmarks that weren't flushed - if len(benchmarks): - jobset.run(benchmarks, maxjobs=1) - jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) - jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + """generate flamegraphs""" + heading('Flamegraphs: %s' % bm_name) + subprocess.check_call([ + 'make', bm_name, 'CONFIG=mutrace', '-j', + '%d' % multiprocessing.cpu_count() + ]) + benchmarks = [] + profile_analysis = [] + cleanup = [] + for line in subprocess.check_output( + ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines(): + link(line, '%s.svg' % fnize(line)) + benchmarks.append( + jobset.JobSpec( + [ + 'perf', 'record', '-o', '%s-perf.data' % fnize( + line), '-g', '-F', '997', 'bins/mutrace/%s' % bm_name, + '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10' + ], + shortname='perf-%s' % fnize(line))) + profile_analysis.append( + jobset.JobSpec( + [ + 'tools/run_tests/performance/process_local_perf_flamegraphs.sh' + ], + environ={ + 'PERF_BASE_NAME': fnize(line), + 'OUTPUT_DIR': 'reports', + 'OUTPUT_FILENAME': fnize(line), + }, + shortname='flame-%s' % fnize(line))) + cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) + cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) + # periodically flush out the list of jobs: temporary space required for this + # processing is large + if len(benchmarks) >= 20: + # run up to half the cpu count: each benchmark can use up to two cores + # (one for the microbenchmark, one for the data flush) + jobset.run(benchmarks, maxjobs=1) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + benchmarks = [] + profile_analysis = [] + cleanup = [] + # run the remaining benchmarks that weren't flushed + if len(benchmarks): + jobset.run(benchmarks, maxjobs=1) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + def run_summary(bm_name, cfg, base_json_name): - subprocess.check_call( - ['make', bm_name, - 'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()]) - cmd = ['bins/%s/%s' % (cfg, bm_name), - '--benchmark_out=%s.%s.json' % (base_json_name, cfg), - '--benchmark_out_format=json'] - if args.summary_time is not None: - cmd += ['--benchmark_min_time=%d' % args.summary_time] - return subprocess.check_output(cmd) + subprocess.check_call([ + 'make', bm_name, 'CONFIG=%s' % cfg, '-j', + '%d' % multiprocessing.cpu_count() + ]) + cmd = [ + 'bins/%s/%s' % (cfg, bm_name), '--benchmark_out=%s.%s.json' % + (base_json_name, cfg), '--benchmark_out_format=json' + ] + if args.summary_time is not None: + cmd += ['--benchmark_min_time=%d' % args.summary_time] + return subprocess.check_output(cmd) + def collect_summary(bm_name, args): - heading('Summary: %s [no counters]' % bm_name) - text(run_summary(bm_name, 'opt', bm_name)) - heading('Summary: %s [with counters]' % bm_name) - text(run_summary(bm_name, 'counters', bm_name)) - if args.bigquery_upload: - with open('%s.csv' % bm_name, 'w') as f: - f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', - '%s.counters.json' % bm_name, - '%s.opt.json' % bm_name])) - subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name]) + heading('Summary: %s [no counters]' % bm_name) + text(run_summary(bm_name, 'opt', bm_name)) + heading('Summary: %s [with counters]' % bm_name) + text(run_summary(bm_name, 'counters', bm_name)) + if args.bigquery_upload: + with open('%s.csv' % bm_name, 'w') as f: + f.write( + subprocess.check_output([ + 'tools/profiling/microbenchmarks/bm2bq.py', + '%s.counters.json' % bm_name, '%s.opt.json' % bm_name + ])) + subprocess.check_call([ + 'bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name + ]) + collectors = { - 'latency': collect_latency, - 'perf': collect_perf, - 'summary': collect_summary, + 'latency': collect_latency, + 'perf': collect_perf, + 'summary': collect_summary, } argp = argparse.ArgumentParser(description='Collect data from microbenchmarks') -argp.add_argument('-c', '--collect', - choices=sorted(collectors.keys()), - nargs='*', - default=sorted(collectors.keys()), - help='Which collectors should be run against each benchmark') -argp.add_argument('-b', '--benchmarks', - choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, - default=bm_constants._AVAILABLE_BENCHMARK_TESTS, - nargs='+', - type=str, - help='Which microbenchmarks should be run') -argp.add_argument('--bigquery_upload', - default=False, - action='store_const', - const=True, - help='Upload results from summary collection to bigquery') -argp.add_argument('--summary_time', - default=None, - type=int, - help='Minimum time to run benchmarks for the summary collection') +argp.add_argument( + '-c', + '--collect', + choices=sorted(collectors.keys()), + nargs='*', + default=sorted(collectors.keys()), + help='Which collectors should be run against each benchmark') +argp.add_argument( + '-b', + '--benchmarks', + choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, + default=bm_constants._AVAILABLE_BENCHMARK_TESTS, + nargs='+', + type=str, + help='Which microbenchmarks should be run') +argp.add_argument( + '--bigquery_upload', + default=False, + action='store_const', + const=True, + help='Upload results from summary collection to bigquery') +argp.add_argument( + '--summary_time', + default=None, + type=int, + help='Minimum time to run benchmarks for the summary collection') args = argp.parse_args() try: - for collect in args.collect: - for bm_name in args.benchmarks: - collectors[collect](bm_name, args) + for collect in args.collect: + for bm_name in args.benchmarks: + collectors[collect](bm_name, args) finally: - if not os.path.exists('reports'): - os.makedirs('reports') - index_html += "\n