Add tests for pull request test filtering

pull/8461/head
Matt Kwong 8 years ago
parent 5c691c634d
commit 7e9bd6ca9a
  1. 18
      tools/run_tests/filter_pull_request_tests.py
  2. 208
      tools/run_tests/run_tests_matrix.py
  3. 138
      tools/run_tests/sanity/check_test_filtering.py
  4. 1
      tools/run_tests/sanity/sanity_tests.yaml

@ -31,7 +31,7 @@
"""Filter out tests based on file differences compared to merge target branch"""
import re
from subprocess import call, check_output
from subprocess import check_output
class TestSuite:
@ -105,7 +105,7 @@ _WHITELIST_DICT = {
'config\.m4$': [_PHP_TEST_SUITE],
'CONTRIBUTING\.md$': [],
'Gemfile$': [_RUBY_TEST_SUITE],
'grpc.def$': [_WINDOWS_TEST_SUITE],
'grpc\.def$': [_WINDOWS_TEST_SUITE],
'grpc\.gemspec$': [_RUBY_TEST_SUITE],
'gRPC\.podspec$': [_OBJC_TEST_SUITE],
'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
@ -172,15 +172,11 @@ def filter_tests(tests, base_branch):
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print("Finding file differences between gRPC %s branch and pull request...\n" % base_branch)
print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(" %s" % changed_file)
print
# todo(mattkwong): Remove this
# Faking changed files to test test filtering on Jenkins
changed_files = ['src/node/something', 'src/python/something']
print(' %s' % changed_file)
print('')
# Regex that combines all keys in _WHITELIST_DICT
all_triggers = "(" + ")|(".join(_WHITELIST_DICT.keys()) + ")"
@ -193,8 +189,8 @@ def filter_tests(tests, base_branch):
for test_suite in _ALL_TEST_SUITES:
if _can_skip_tests(changed_files, test_suite.triggers):
for label in test_suite.labels:
print(" %s tests safe to skip" % label)
print(' %s tests safe to skip' % label)
skippable_labels.append(label)
tests = _remove_irrelevant_tests(tests, skippable_labels)
return tests

@ -231,112 +231,110 @@ def _allowed_labels():
return sorted(all_labels)
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs',
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
type=int,
help='Number of concurrent run_tests.py instances.')
argp.add_argument('-f', '--filter',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Filter targets to run by label with AND semantics.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Pass --force_default_poller to run_tests.py instances.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Only print what would be run.')
argp.add_argument('--filter_pr_tests',
default=False,
action='store_const',
const=True,
help='Filters out tests irrelavant to pull request changes.')
argp.add_argument('--base_branch',
default='origin/master',
type=str,
help='Branch that pull request is requesting to merge into')
argp.add_argument('--inner_jobs',
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = []
for job in all_jobs:
if not args.filter or all(filter in job.labels for filter in args.filter):
jobs.append(job)
if not jobs:
jobset.message('FAILED', 'No test suites match given criteria.',
do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
skipped_jobs = []
if args.filter_pr_tests:
print 'Looking for irrelevant tests to skip...'
relevant_jobs = filter_tests(jobs, args.base_branch)
print
if len(relevant_jobs) == len(jobs):
print 'No tests will be skipped.'
else:
print 'These tests will be skipped:'
skipped_jobs = set(jobs) - set(relevant_jobs)
for job in list(skipped_jobs):
print ' %s' % job.shortname
jobs = relevant_jobs
if __name__ == "__main__":
argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
argp.add_argument('-j', '--jobs',
default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
type=int,
help='Number of concurrent run_tests.py instances.')
argp.add_argument('-f', '--filter',
choices=_allowed_labels(),
nargs='+',
default=[],
help='Filter targets to run by label with AND semantics.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Pass --build_only flag to run_tests.py instances.')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Pass --force_default_poller to run_tests.py instances.')
argp.add_argument('--dry_run',
default=False,
action='store_const',
const=True,
help='Only print what would be run.')
argp.add_argument('--filter_pr_tests',
default=False,
action='store_const',
const=True,
help='Filters out tests irrelavant to pull request changes.')
argp.add_argument('--base_branch',
default='origin/master',
type=str,
help='Branch that pull request is requesting to merge into')
argp.add_argument('--inner_jobs',
default=_DEFAULT_INNER_JOBS,
type=int,
help='Number of jobs in each run_tests.py instance')
args = argp.parse_args()
extra_args = []
if args.build_only:
extra_args.append('--build_only')
if args.force_default_poller:
extra_args.append('--force_default_poller')
all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
_create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
jobs = []
for job in all_jobs:
if not args.filter or all(filter in job.labels for filter in args.filter):
jobs.append(job)
if not jobs:
jobset.message('FAILED', 'No test suites match given criteria.',
do_newline=True)
sys.exit(1)
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment or into subworkspaces.')
skipped_jobs = []
if args.filter_pr_tests:
print('Looking for irrelevant tests to skip...')
relevant_jobs = filter_tests(jobs, args.base_branch)
if len(relevant_jobs) == len(jobs):
print('No tests will be skipped.')
else:
print('These tests will be skipped:')
skipped_jobs = [job for job in jobs if job not in relevant_jobs]
for job in list(skipped_jobs):
print(' %s' % job.shortname)
jobs = relevant_jobs
print('Will run these tests:')
for job in jobs:
if args.dry_run:
print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
else:
print(' %s' % job.shortname)
print
print 'Will run these tests:'
for job in jobs:
if args.dry_run:
print ' %s: "%s"' % (job.shortname, ' '.join(job.cmdline))
print('--dry_run was used, exiting')
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
skipped_results = jobset.run(skipped_jobs,
skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
print ' %s' % job.shortname
print
if args.dry_run:
print '--dry_run was used, exiting'
sys.exit(1)
jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
# Merge skipped tests into results to show skipped tests on report.xml
if skipped_jobs:
skipped_results = jobset.run(skipped_jobs,
skip_jobs=True)
resultset.update(skipped_results)
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
do_newline=True)
else:
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)
jobset.message('FAILED', 'Some run_tests.py instance have failed.',
do_newline=True)
sys.exit(1)

@ -0,0 +1,138 @@
#!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
import re
# hack import paths to pick up extra code
sys.path.insert(0, os.path.abspath('tools/run_tests/'))
from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
import filter_pull_request_tests
_LIST_OF_LANGUAGE_LABELS = ['sanity', 'c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby']
_LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
class TestFilteringTest(unittest.TestCase):
def generate_all_tests(self):
all_jobs = _create_test_jobs() + _create_portability_test_jobs()
self.assertIsNotNone(all_jobs)
return all_jobs
def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
"""
Default args should filter no tests because changed_files is empty and
default labels should be able to match all jobs
:param changed_files: mock list of changed_files from pull request
:param labels: list of job labels that should be skipped
"""
all_jobs = self.generate_all_tests()
# Replacing _get_changed_files function to allow specifying changed files in filter_tests function
def _get_changed_files(foo):
return changed_files
filter_pull_request_tests._get_changed_files = _get_changed_files
print
filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
for label in labels:
for job in filtered_jobs:
self.assertNotIn(label, job.labels)
jobs_matching_labels = 0
for label in labels:
for job in all_jobs:
if (label in job.labels):
jobs_matching_labels += 1
self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
def test_individual_language_filters(self):
# Changing unlisted file should trigger all languages
self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
# Changing core should trigger all tests
self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
# Testing individual languages
self.test_filtering(['templates/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._SANITY_TEST_SUITE.labels])
self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels])
self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels])
self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels])
self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_combined_language_filters(self):
self.test_filtering(['templates/foo.bar', 'test/core/foo.bar'],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._SANITY_TEST_SUITE.labels and label not in
filter_pull_request_tests._CORE_TEST_SUITE.labels])
self.test_filtering(['src/node/foo.bar', 'src/cpp/foo.bar', "src/csharp/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._NODE_TEST_SUITE.labels and label not in
filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
[label for label in _LIST_OF_LANGUAGE_LABELS if label not in
filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
filter_pull_request_tests._RUBY_TEST_SUITE.labels])
def test_platform_filter(self):
self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
def test_whitelist(self):
whitelist = filter_pull_request_tests._WHITELIST_DICT
files_that_should_trigger_all_tests = ['src/core/foo.bar',
'some_file_not_on_the_white_list',
'BUILD',
'etc/roots.pem',
'Makefile',
'tools/foo']
for key in whitelist.keys():
for file_name in files_that_should_trigger_all_tests:
self.assertFalse(re.match(key, file_name))
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -2,6 +2,7 @@
- script: tools/run_tests/sanity/check_cache_mk.sh
- script: tools/run_tests/sanity/check_sources_and_headers.py
- script: tools/run_tests/sanity/check_submodules.sh
- script: tools/run_tests/sanity/check_test_filtering.py
- script: tools/buildgen/generate_projects.sh -j 3
cpu_cost: 3
- script: tools/distrib/check_copyright.py

Loading…
Cancel
Save