Many improvements to detect flakes script

pull/13226/head
David Garcia Quintas 7 years ago
parent e1d91cfd58
commit 69f3767a3e
  1. 249
      tools/flakes/detect_flakes.py

@ -13,18 +13,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Detect new flakes introduced in the last 24h hours with respect to the """Detect new flakes and create issues for them"""
previous six days"""
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import datetime import datetime
import json
import logging
import os import os
import pprint
import sys import sys
import logging import urllib2
logging.basicConfig(format='%(asctime)s %(message)s') from collections import namedtuple
gcp_utils_dir = os.path.abspath( gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../gcp/utils')) os.path.join(os.path.dirname(__file__), '../gcp/utils'))
@ -32,73 +34,230 @@ sys.path.append(gcp_utils_dir)
import big_query_utils import big_query_utils
def print_table(table): GH_ISSUES_URL = 'https://api.github.com/repos/grpc/grpc/issues'
kokoro_base_url = 'https://kokoro.corp.google.com/job/' KOKORO_BASE_URL = 'https://kokoro2.corp.google.com/job/'
for k, v in table.items():
job_name = v[0] def gh(url, data=None):
build_id = v[1] request = urllib2.Request(url, data=data)
ts = int(float(v[2])) assert TOKEN
# TODO(dgq): timezone handling is wrong. We need to determine the timezone request.add_header('Authorization', 'token {}'.format(TOKEN))
# of the computer running this script. if data:
human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S PDT') request.add_header('Content-type', 'application/json')
job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id) response = urllib2.urlopen(request)
full_kokoro_url = kokoro_base_url + job_path if 200 <= response.getcode() < 300:
print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts, full_kokoro_url)) return json.loads(response.read())
else:
raise ValueError('Error ({}) accessing {}'.format(
response.getcode(), response.geturl()))
def create_gh_issue(title, body, labels):
data = json.dumps({'title': title,
'body': body,
'labels': labels})
response = gh(GH_ISSUES_URL, data)
issue_url = response['html_url']
print('Issue {} created for {}'.format(issue_url, title))
def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None): def build_kokoro_url(job_name, build_id):
""" period is one of "WEEK", "DAY", etc. job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
(see https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#date_add). """ return KOKORO_BASE_URL + job_path
def create_issues(new_flakes):
for test_name, results_row in new_flakes.items():
poll_strategy, job_name, build_id, timestamp = results_row
url = build_kokoro_url(job_name, build_id)
title = 'New Flake: ' + test_name
body = '- Test: {}\n- Poll Strategy: {}\n- URL: {}'.format(
test_name, poll_strategy, url)
labels = ['infra/New Flakes']
create_gh_issue(title, body, labels)
def print_table(table, format):
for test_name, results_row in table.items():
poll_strategy, job_name, build_id, timestamp = results_row
ts = int(float(timestamp))
# TODO(dgq): timezone handling is wrong. We need to determine the timezone
# of the computer running this script.
human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S UTC')
full_kokoro_url = build_kokoro_url(job_name, build_id)
if format == 'human':
print("\t- Test: {}, Polling: {}, Timestamp: {}, url: {}".format(
test_name, poll_strategy, human_ts, full_kokoro_url))
else:
assert(format == 'csv')
print("{},{},{}".format(test_name, ts, human_ts, full_kokoro_url))
Row = namedtuple('Row', ['poll_strategy', 'job_name', 'build_id', 'timestamp'])
def get_flaky_tests(from_date, to_date, limit=None):
"""Return flaky tests for date range (from_date, to_date], where both are
strings of the form "YYYY-MM-DD" """
bq = big_query_utils.create_big_query() bq = big_query_utils.create_big_query()
query = """ query = """
#standardSQL
SELECT SELECT
REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name, RTRIM(LTRIM(REGEXP_REPLACE(filtered_test_name, r'(/\d+)|(bins/.+/)|(cmake/.+/.+/)', ''))) AS test_binary,
REGEXP_EXTRACT(test_name, r'GRPC_POLL_STRATEGY=(\w+)') AS poll_strategy,
job_name, job_name,
build_id, build_id,
timestamp timestamp
FROM FROM (
[grpc-testing:jenkins_test_results.aggregate_results] SELECT
WHERE REGEXP_REPLACE(test_name, r'(/\d+)|(GRPC_POLL_STRATEGY=.+)', '') AS filtered_test_name,
timestamp > DATE_ADD(CURRENT_DATE(), {days_lower_bound}, "DAY") test_name,
AND timestamp <= DATE_ADD(CURRENT_DATE(), {days_upper_bound}, "DAY") job_name,
AND NOT REGEXP_MATCH(job_name, '.*portability.*') build_id,
AND result != 'PASSED' AND result != 'SKIPPED' timestamp
ORDER BY timestamp desc FROM `grpc-testing.jenkins_test_results.aggregate_results`
""".format(days_lower_bound=days_lower_bound, days_upper_bound=days_upper_bound) WHERE
timestamp > TIMESTAMP("{from_date}")
AND timestamp <= TIMESTAMP("{to_date}")
AND NOT REGEXP_CONTAINS(job_name, 'portability')
AND result != 'PASSED' AND result != 'SKIPPED'
)
ORDER BY timestamp desc""".format(
from_date=from_date.isoformat(), to_date=to_date.isoformat())
if limit: if limit:
query += '\n LIMIT {}'.format(limit) query += '\n LIMIT {}'.format(limit)
logging.debug("Query:\n%s", query)
query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query) query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
page = bq.jobs().getQueryResults( page = bq.jobs().getQueryResults(
pageToken=None, **query_job['jobReference']).execute(num_retries=3) pageToken=None, **query_job['jobReference']).execute(num_retries=3)
rows = page.get('rows') rows = page.get('rows')
if rows: if rows:
return {row['f'][0]['v']: return {row['f'][0]['v']:
(row['f'][1]['v'], row['f'][2]['v'], row['f'][3]['v']) Row(poll_strategy=row['f'][1]['v'],
job_name=row['f'][2]['v'],
build_id=row['f'][3]['v'],
timestamp=row['f'][4]['v'])
for row in rows} for row in rows}
else: else:
return {} return {}
def get_new_flakes(): def parse_isodate(date_str):
last_week_sans_yesterday = get_flaky_tests(-14, -1) return datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
last_24 = get_flaky_tests(0, +1)
last_week_sans_yesterday_names = set(last_week_sans_yesterday.keys())
last_24_names = set(last_24.keys()) def get_new_flakes(args):
logging.debug('|last_week_sans_yesterday| =', len(last_week_sans_yesterday_names)) """The from_date_str argument marks the beginning of the "calibration", used
logging.debug('|last_24_names| =', len(last_24_names)) to establish the set of pre-existing flakes, which extends over
new_flakes = last_24_names - last_week_sans_yesterday_names "calibration_days". After the calibration period, "reporting_days" is the
logging.debug('|new_flakes| = ', len(new_flakes)) length of time during which new flakes will be reported.
return {k: last_24[k] for k in new_flakes}
from
date
|--------------------|---------------|
^____________________^_______________^
calibration reporting
days days
"""
dates = process_date_args(args)
calibration_results = get_flaky_tests(dates['calibration']['begin'],
dates['calibration']['end'])
reporting_results = get_flaky_tests(dates['reporting']['begin'],
dates['reporting']['end'])
logging.debug('Calibration results: %s', pprint.pformat(calibration_results))
logging.debug('Reporting results: %s', pprint.pformat(reporting_results))
calibration_names = set(calibration_results.keys())
logging.info('|calibration_results (%s, %s]| = %d',
dates['calibration']['begin'].isoformat(),
dates['calibration']['end'].isoformat(),
len(calibration_names))
reporting_names = set(reporting_results.keys())
logging.info('|reporting_results (%s, %s]| = %d',
dates['reporting']['begin'].isoformat(),
dates['reporting']['end'].isoformat(),
len(reporting_names))
new_flakes = reporting_names - calibration_names
logging.info('|new_flakes| = %d', len(new_flakes))
return {k: reporting_results[k] for k in new_flakes}
def build_args_parser():
import argparse, datetime
parser = argparse.ArgumentParser()
today = datetime.date.today()
a_week_ago = today - datetime.timedelta(days=7)
parser.add_argument('--calibration_days', type=int, default=7,
help='How many days to consider for pre-existing flakes.')
parser.add_argument('--reporting_days', type=int, default=1,
help='How many days to consider for the detection of new flakes.')
parser.add_argument('--count_only', dest='count_only', action='store_true',
help='Display only number of new flakes.')
parser.set_defaults(count_only=False)
parser.add_argument('--create_issues', dest='create_issues', action='store_true',
help='Create issues for all new flakes.')
parser.set_defaults(create_issues=False)
parser.add_argument('--token', type=str, default='',
help='GitHub token to use its API with a higher rate limit')
parser.add_argument('--format', type=str, choices=['human', 'csv'],
default='human', help='Output format: are you a human or a machine?')
parser.add_argument('--loglevel', type=str,
choices=['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL'],
default='WARNING', help='Logging level.')
return parser
def process_date_args(args):
calibration_begin = (datetime.date.today() -
datetime.timedelta(days=args.calibration_days) -
datetime.timedelta(days=args.reporting_days))
calibration_end = calibration_begin + datetime.timedelta(days=args.calibration_days)
reporting_begin = calibration_end
reporting_end = reporting_begin + datetime.timedelta(days=args.reporting_days)
return {'calibration': {'begin': calibration_begin, 'end': calibration_end},
'reporting': {'begin': reporting_begin, 'end': reporting_end }}
def main(): def main():
new_flakes = get_new_flakes() global TOKEN
if new_flakes: args_parser = build_args_parser()
print("Found {} new flakes:".format(len(new_flakes))) args = args_parser.parse_args()
print_table(new_flakes) if args.create_issues and not args.token:
raise ValueError('Missing --token argument, needed to create GitHub issues')
TOKEN = args.token
logging_level = getattr(logging, args.loglevel)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging_level)
new_flakes = get_new_flakes(args)
dates = process_date_args(args)
dates_info_string = 'from {} until {} (calibrated from {} until {})'.format(
dates['reporting']['begin'].isoformat(),
dates['reporting']['end'].isoformat(),
dates['calibration']['begin'].isoformat(),
dates['calibration']['end'].isoformat())
if args.format == 'human':
if args.count_only:
print(len(new_flakes), dates_info_string)
elif new_flakes:
found_msg = 'Found {} new flakes {}'.format(len(new_flakes), dates_info_string)
print(found_msg)
print('*' * len(found_msg))
print_table(new_flakes, 'human')
create_issues(new_flakes)
else:
print('No new flakes found '.format(len(new_flakes)), dates_info_string)
elif args.format == 'csv':
if args.count_only:
print('from_date,to_date,count')
print('{},{},{}'.format(
dates['reporting']['begin'].isoformat(),
dates['reporting']['end'].isoformat(),
len(new_flakes)))
else:
print('test,timestamp,readable_timestamp,url')
print_table(new_flakes, 'csv')
else: else:
print("No new flakes found!") raise ValueError('Invalid argument for --format: {}'.format(args.format))
if __name__ == '__main__': if __name__ == '__main__':

Loading…
Cancel
Save