Merge pull request #8461 from matt-kwong/pull-request-params

Enable pull request test filtering
pull/8626/merge
matt-kwong 8 years ago committed by GitHub
commit 24e5f37567
  1. 26
      tools/run_tests/filter_pull_request_tests.py
  2. 13
      tools/run_tests/jobset.py
  3. 2
      tools/run_tests/report_utils.py
  4. 205
      tools/run_tests/run_tests_matrix.py
  5. 149
      tools/run_tests/sanity/check_test_filtering.py
  6. 1
      tools/run_tests/sanity/sanity_tests.yaml

@ -31,7 +31,7 @@
"""Filter out tests based on file differences compared to merge target branch"""
import re
from subprocess import call, check_output
from subprocess import check_output
class TestSuite:
@ -56,7 +56,6 @@ class TestSuite:
# Create test suites
_SANITY_TEST_SUITE = TestSuite(['sanity'])
_CORE_TEST_SUITE = TestSuite(['c'])
_CPP_TEST_SUITE = TestSuite(['c++'])
_CSHARP_TEST_SUITE = TestSuite(['csharp'])
@ -68,15 +67,16 @@ _RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos'])
_ALL_TEST_SUITES = [_SANITY_TEST_SUITE, _CORE_TEST_SUITE, _CPP_TEST_SUITE,
_CSHARP_TEST_SUITE, _NODE_TEST_SUITE, _OBJC_TEST_SUITE,
_PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
_LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
_NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
_PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
_WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
# Dictionary of whitelistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
# the changed files should not trigger any tests. Any changed file that does not
# match any of these regexes will trigger all tests
# DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
_WHITELIST_DICT = {
'^doc/': [],
'^examples/': [],
@ -89,7 +89,7 @@ _WHITELIST_DICT = {
'^src/php/': [_PHP_TEST_SUITE],
'^src/python/': [_PYTHON_TEST_SUITE],
'^src/ruby/': [_RUBY_TEST_SUITE],
'^templates/': [_SANITY_TEST_SUITE],
'^templates/': [],
'^test/core/': [_CORE_TEST_SUITE],
'^test/cpp/': [_CPP_TEST_SUITE],
'^test/distrib/cpp/': [_CPP_TEST_SUITE],
@ -104,7 +104,7 @@ _WHITELIST_DICT = {
'config\.m4$': [_PHP_TEST_SUITE],
'CONTRIBUTING\.md$': [],
'Gemfile$': [_RUBY_TEST_SUITE],
'grpc.def$': [_WINDOWS_TEST_SUITE],
'grpc\.def$': [_WINDOWS_TEST_SUITE],
'grpc\.gemspec$': [_RUBY_TEST_SUITE],
'gRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
@ -171,11 +171,11 @@ def filter_tests(tests, base_branch):
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch)
print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(changed_file)
print
print(' %s' % changed_file)
print('')
# Regex that combines all keys in _WHITELIST_DICT
all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
@ -188,8 +188,8 @@ def filter_tests(tests, base_branch):
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
print(" Filtering %s tests" % label)
print(' %s tests safe to skip' % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests

@ -96,6 +96,7 @@ _COLORS = {
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
'cyan': [ 36, 0 ]
}
@ -114,6 +115,7 @@ _TAG_COLOR = {
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
@ -450,7 +452,16 @@ def run(cmdlines,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={}):
add_env={},
skip_jobs=False):
if skip_jobs:
results = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
results[job.shortname] = [skipped_job_result]
return results
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env)

@ -74,6 +74,8 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
ET.SubElement(xml_test, 'failure', message='Failure')
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout')
elif result.state == 'SKIPPED':
ET.SubElement(xml_test, 'skipped', message='Skipped')
tree = ET.ElementTree(root)
tree.write(xml_report, encoding='UTF-8')

@ -241,105 +241,110 @@ def _allowed_labels():
return sorted(all_labels)
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs',
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
type=int,
help='Number of concurrent run_tests.py instances.')
argp.add_argument('-f', '--filter',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Filter targets to run by label with AND semantics.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Pass --force_default_poller to run_tests.py instances.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Only print what would be run.')
argp.add_argument('--filter_pr_tests',
default=False,
action='store_const',
const=True,
help='Filters out tests irrelavant to pull request changes.')
argp.add_argument('--base_branch',
default='origin/master',
type=str,
help='Branch that pull request is requesting to merge into')
argp.add_argument('--inner_jobs',
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = []
for job in all_jobs:
if not args.filter or all(filter in job.labels for filter in args.filter):
jobs.append(job)
if not jobs:
jobset.message('FAILED', 'No test suites match given criteria.',
do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
print
print 'Will run these tests:'
for job in jobs:
if args.dry_run:
print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline))
else:
print ' %s' % job.shortname
print
if args.filter_pr_tests:
print 'IMPORTANT: Test filtering is not active; this is only for testing.'
relevant_jobs = filter_tests(jobs, args.base_branch)
# todo(mattkwong): add skipped tests to report.xml
print
if len(relevant_jobs) == len(jobs):
print '(TESTING) No tests will be skipped.'
else:
print '(TESTING) These tests will be skipped:'
for job in list(set(jobs) - set(relevant_jobs)):
print ' %s' % job.shortname
if __name__ == "__main__":
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs',
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
type=int,
help='Number of concurrent run_tests.py instances.')
argp.add_argument('-f', '--filter',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Filter targets to run by label with AND semantics.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Pass --force_default_poller to run_tests.py instances.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Only print what would be run.')
argp.add_argument('--filter_pr_tests',
default=False,
action='store_const',
const=True,
help='Filters out tests irrelavant to pull request changes.')
argp.add_argument('--base_branch',
default='origin/master',
type=str,
help='Branch that pull request is requesting to merge into')
argp.add_argument('--inner_jobs',
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = []
for job in all_jobs:
if not args.filter or all(filter in job.labels for filter in args.filter):
jobs.append(job)
if not jobs:
jobset.message('FAILED', 'No test suites match given criteria.',
do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
skipped_jobs = []
if args.filter_pr_tests:
print('Looking for irrelevant tests to skip...')
relevant_jobs = filter_tests(jobs, args.base_branch)
if len(relevant_jobs) == len(jobs):
print('No tests will be skipped.')
else:
print('These tests will be skipped:')
skipped_jobs = [job for job in jobs if job not in relevant_jobs]
for job in list(skipped_jobs):
print(' %s' % job.shortname)
jobs = relevant_jobs
print('Will run these tests:')
for job in jobs:
if args.dry_run:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
else:
print(' %s' % job.shortname)
print
if args.dry_run:
print '--dry_run was used, exiting'
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)
if args.dry_run:
print('--dry_run was used, exiting')
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
skipped_results = jobset.run(skipped_jobs,
skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)

@ -0,0 +1,149 @@
#!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
import re
# hack import paths to pick up extra code
sys.path.insert(0, os.path.abspath('tools/run_tests/'))
from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
import filter_pull_request_tests
_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby']
_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
class TestFilteringTest(unittest.TestCase):
def generate_all_tests(self):
all_jobs = _create_test_jobs() + _create_portability_test_jobs()
self.assertIsNotNone(all_jobs)
return all_jobs
def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
"""
Default args should filter no tests because changed_files is empty and
default labels should be able to match all jobs
:param changed_files: mock list of changed_files from pull request
:param labels: list of job labels that should be skipped
"""
all_jobs = self.generate_all_tests()
# Replacing _get_changed_files function to allow specifying changed files in filter_tests function
def _get_changed_files(foo):
return changed_files
filter_pull_request_tests._get_changed_files = _get_changed_files
print
filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
# Make sure sanity tests aren't being filtered out
sanity_tests_in_all_jobs = 0
sanity_tests_in_filtered_jobs = 0
for job in all_jobs:
if "sanity" in job.labels:
sanity_tests_in_all_jobs += 1
all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
for job in filtered_jobs:
if "sanity" in job.labels:
sanity_tests_in_filtered_jobs += 1
filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels]
self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs)
for label in labels:
for job in filtered_jobs:
self.assertNotIn(label, job.labels)
jobs_matching_labels = 0
for label in labels:
for job in all_jobs:
if (label in job.labels):
jobs_matching_labels += 1
self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
def test_individual_language_filters(self):
# Changing unlisted file should trigger all languages
self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
# Changing core should trigger all tests
self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
# Testing individual languages
self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels])
self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels])
self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels])
self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_combined_language_filters(self):
self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels and label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_platform_filter(self):
self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
def test_whitelist(self):
whitelist = filter_pull_request_tests._WHITELIST_DICT
files_that_should_trigger_all_tests = ['src/core/foo.bar',
'some_file_not_on_the_white_list',
'BUILD',
'etc/roots.pem',
'Makefile',
'tools/foo']
for key in whitelist.keys():
for file_name in files_that_should_trigger_all_tests:
self.assertFalse(re.match(key, file_name))
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -2,6 +2,7 @@
- script: tools/run_tests/sanity/check_cache_mk.sh
- script: tools/run_tests/sanity/check_sources_and_headers.py
- script: tools/run_tests/sanity/check_submodules.sh
- script: tools/run_tests/sanity/check_test_filtering.py
- script: tools/buildgen/generate_projects.sh -j 3
cpu_cost: 3
- script: tools/distrib/check_copyright.py

Loading…
Cancel
Save