|
|
|
@ -12,7 +12,6 @@ |
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
|
|
|
# See the License for the specific language governing permissions and |
|
|
|
|
# limitations under the License. |
|
|
|
|
|
|
|
|
|
"""Detect new flakes and create issues for them""" |
|
|
|
|
|
|
|
|
|
from __future__ import absolute_import |
|
|
|
@ -39,6 +38,7 @@ GH_ISSUE_CREATION_URL = 'https://api.github.com/repos/grpc/grpc/issues' |
|
|
|
|
GH_ISSUE_SEARCH_URL = 'https://api.github.com/search/issues' |
|
|
|
|
KOKORO_BASE_URL = 'https://kokoro2.corp.google.com/job/' |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def gh(url, data=None): |
|
|
|
|
request = urllib2.Request(url, data=data) |
|
|
|
|
assert TOKEN |
|
|
|
@ -49,8 +49,8 @@ def gh(url, data=None): |
|
|
|
|
if 200 <= response.getcode() < 300: |
|
|
|
|
return json.loads(response.read()) |
|
|
|
|
else: |
|
|
|
|
raise ValueError('Error ({}) accessing {}'.format( |
|
|
|
|
response.getcode(), response.geturl())) |
|
|
|
|
raise ValueError('Error ({}) accessing {}'.format(response.getcode(), |
|
|
|
|
response.geturl())) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def search_gh_issues(search_term, status='open'): |
|
|
|
@ -62,9 +62,7 @@ def search_gh_issues(search_term, status='open'): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_gh_issue(title, body, labels, assignees=[]): |
|
|
|
|
params = {'title': title, |
|
|
|
|
'body': body, |
|
|
|
|
'labels': labels} |
|
|
|
|
params = {'title': title, 'body': body, 'labels': labels} |
|
|
|
|
if assignees: |
|
|
|
|
params['assignees'] = assignees |
|
|
|
|
data = json.dumps(params) |
|
|
|
@ -94,17 +92,23 @@ def create_issues(new_flakes, always_create): |
|
|
|
|
else: |
|
|
|
|
preexisting_issues = search_gh_issues(test_name) |
|
|
|
|
if preexisting_issues['total_count'] > 0: |
|
|
|
|
print('\nFound {} issues for "{}":'.format( |
|
|
|
|
preexisting_issues['total_count'], test_name)) |
|
|
|
|
print('\nFound {} issues for "{}":'.format(preexisting_issues[ |
|
|
|
|
'total_count'], test_name)) |
|
|
|
|
for issue in preexisting_issues['items']: |
|
|
|
|
print('\t"{}" ; URL: {}'.format(issue['title'], issue['html_url'])) |
|
|
|
|
print('\t"{}" ; URL: {}'.format(issue['title'], issue[ |
|
|
|
|
'html_url'])) |
|
|
|
|
else: |
|
|
|
|
print('\nNo preexisting issues found for "{}"'.format(test_name)) |
|
|
|
|
proceed = raw_input('Create issue for:\nTitle: {}\nBody: {}\n[Y/n] '.format( |
|
|
|
|
print( |
|
|
|
|
'\nNo preexisting issues found for "{}"'.format(test_name)) |
|
|
|
|
proceed = raw_input( |
|
|
|
|
'Create issue for:\nTitle: {}\nBody: {}\n[Y/n] '.format( |
|
|
|
|
title, body)) in ('y', 'Y', '') |
|
|
|
|
if proceed: |
|
|
|
|
assignees_str = raw_input('Asignees? (comma-separated, leave blank for unassigned): ') |
|
|
|
|
assignees = [assignee.strip() for assignee in assignees_str.split(',')] |
|
|
|
|
assignees_str = raw_input( |
|
|
|
|
'Asignees? (comma-separated, leave blank for unassigned): ') |
|
|
|
|
assignees = [ |
|
|
|
|
assignee.strip() for assignee in assignees_str.split(',') |
|
|
|
|
] |
|
|
|
|
create_gh_issue(title, body, labels, assignees) |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -123,7 +127,10 @@ def print_table(table, format): |
|
|
|
|
first_time = False |
|
|
|
|
print("{},{},{}".format(test_name, timestamp, full_kokoro_url)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Row = namedtuple('Row', ['poll_strategy', 'job_name', 'build_id', 'timestamp']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_new_failures(dates): |
|
|
|
|
bq = big_query_utils.create_big_query() |
|
|
|
|
this_script_path = os.path.join(os.path.dirname(__file__)) |
|
|
|
@ -140,12 +147,13 @@ def get_new_failures(dates): |
|
|
|
|
pageToken=None, **query_job['jobReference']).execute(num_retries=3) |
|
|
|
|
rows = page.get('rows') |
|
|
|
|
if rows: |
|
|
|
|
return {row['f'][0]['v']: |
|
|
|
|
Row(poll_strategy=row['f'][1]['v'], |
|
|
|
|
return { |
|
|
|
|
row['f'][0]['v']: Row(poll_strategy=row['f'][1]['v'], |
|
|
|
|
job_name=row['f'][2]['v'], |
|
|
|
|
build_id=row['f'][3]['v'], |
|
|
|
|
timestamp=row['f'][4]['v']) |
|
|
|
|
for row in rows} |
|
|
|
|
for row in rows |
|
|
|
|
} |
|
|
|
|
else: |
|
|
|
|
return {} |
|
|
|
|
|
|
|
|
@ -178,39 +186,74 @@ def build_args_parser(): |
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
|
|
today = datetime.date.today() |
|
|
|
|
a_week_ago = today - datetime.timedelta(days=7) |
|
|
|
|
parser.add_argument('--calibration_days', type=int, default=7, |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--calibration_days', |
|
|
|
|
type=int, |
|
|
|
|
default=7, |
|
|
|
|
help='How many days to consider for pre-existing flakes.') |
|
|
|
|
parser.add_argument('--reporting_days', type=int, default=1, |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--reporting_days', |
|
|
|
|
type=int, |
|
|
|
|
default=1, |
|
|
|
|
help='How many days to consider for the detection of new flakes.') |
|
|
|
|
parser.add_argument('--count_only', dest='count_only', action='store_true', |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--count_only', |
|
|
|
|
dest='count_only', |
|
|
|
|
action='store_true', |
|
|
|
|
help='Display only number of new flakes.') |
|
|
|
|
parser.set_defaults(count_only=False) |
|
|
|
|
parser.add_argument('--create_issues', dest='create_issues', action='store_true', |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--create_issues', |
|
|
|
|
dest='create_issues', |
|
|
|
|
action='store_true', |
|
|
|
|
help='Create issues for all new flakes.') |
|
|
|
|
parser.set_defaults(create_issues=False) |
|
|
|
|
parser.add_argument('--always_create_issues', dest='always_create_issues', action='store_true', |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--always_create_issues', |
|
|
|
|
dest='always_create_issues', |
|
|
|
|
action='store_true', |
|
|
|
|
help='Always create issues for all new flakes. Otherwise,' |
|
|
|
|
' interactively prompt for every issue.') |
|
|
|
|
parser.set_defaults(always_create_issues=False) |
|
|
|
|
parser.add_argument('--token', type=str, default='', |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--token', |
|
|
|
|
type=str, |
|
|
|
|
default='', |
|
|
|
|
help='GitHub token to use its API with a higher rate limit') |
|
|
|
|
parser.add_argument('--format', type=str, choices=['human', 'csv'], |
|
|
|
|
default='human', help='Output format: are you a human or a machine?') |
|
|
|
|
parser.add_argument('--loglevel', type=str, |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--format', |
|
|
|
|
type=str, |
|
|
|
|
choices=['human', 'csv'], |
|
|
|
|
default='human', |
|
|
|
|
help='Output format: are you a human or a machine?') |
|
|
|
|
parser.add_argument( |
|
|
|
|
'--loglevel', |
|
|
|
|
type=str, |
|
|
|
|
choices=['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL'], |
|
|
|
|
default='WARNING', help='Logging level.') |
|
|
|
|
default='WARNING', |
|
|
|
|
help='Logging level.') |
|
|
|
|
return parser |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_date_args(args): |
|
|
|
|
calibration_begin = (datetime.date.today() - |
|
|
|
|
datetime.timedelta(days=args.calibration_days) - |
|
|
|
|
calibration_begin = ( |
|
|
|
|
datetime.date.today() - datetime.timedelta(days=args.calibration_days) - |
|
|
|
|
datetime.timedelta(days=args.reporting_days)) |
|
|
|
|
calibration_end = calibration_begin + datetime.timedelta(days=args.calibration_days) |
|
|
|
|
calibration_end = calibration_begin + datetime.timedelta( |
|
|
|
|
days=args.calibration_days) |
|
|
|
|
reporting_begin = calibration_end |
|
|
|
|
reporting_end = reporting_begin + datetime.timedelta(days=args.reporting_days) |
|
|
|
|
return {'calibration': {'begin': calibration_begin, 'end': calibration_end}, |
|
|
|
|
'reporting': {'begin': reporting_begin, 'end': reporting_end }} |
|
|
|
|
reporting_end = reporting_begin + datetime.timedelta( |
|
|
|
|
days=args.reporting_days) |
|
|
|
|
return { |
|
|
|
|
'calibration': { |
|
|
|
|
'begin': calibration_begin, |
|
|
|
|
'end': calibration_end |
|
|
|
|
}, |
|
|
|
|
'reporting': { |
|
|
|
|
'begin': reporting_begin, |
|
|
|
|
'end': reporting_end |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
@ -218,7 +261,8 @@ def main(): |
|
|
|
|
args_parser = build_args_parser() |
|
|
|
|
args = args_parser.parse_args() |
|
|
|
|
if args.create_issues and not args.token: |
|
|
|
|
raise ValueError('Missing --token argument, needed to create GitHub issues') |
|
|
|
|
raise ValueError( |
|
|
|
|
'Missing --token argument, needed to create GitHub issues') |
|
|
|
|
TOKEN = args.token |
|
|
|
|
|
|
|
|
|
logging_level = getattr(logging, args.loglevel) |
|
|
|
@ -237,25 +281,26 @@ def main(): |
|
|
|
|
if args.count_only: |
|
|
|
|
print(len(new_flakes), dates_info_string) |
|
|
|
|
elif new_flakes: |
|
|
|
|
found_msg = 'Found {} new flakes {}'.format(len(new_flakes), dates_info_string) |
|
|
|
|
found_msg = 'Found {} new flakes {}'.format( |
|
|
|
|
len(new_flakes), dates_info_string) |
|
|
|
|
print(found_msg) |
|
|
|
|
print('*' * len(found_msg)) |
|
|
|
|
print_table(new_flakes, 'human') |
|
|
|
|
if args.create_issues: |
|
|
|
|
create_issues(new_flakes, args.always_create_issues) |
|
|
|
|
else: |
|
|
|
|
print('No new flakes found '.format(len(new_flakes)), dates_info_string) |
|
|
|
|
print('No new flakes found '.format(len(new_flakes)), |
|
|
|
|
dates_info_string) |
|
|
|
|
elif args.format == 'csv': |
|
|
|
|
if args.count_only: |
|
|
|
|
print('from_date,to_date,count') |
|
|
|
|
print('{},{},{}'.format( |
|
|
|
|
dates['reporting']['begin'].isoformat(), |
|
|
|
|
dates['reporting']['end'].isoformat(), |
|
|
|
|
len(new_flakes))) |
|
|
|
|
print('{},{},{}'.format(dates['reporting']['begin'].isoformat( |
|
|
|
|
), dates['reporting']['end'].isoformat(), len(new_flakes))) |
|
|
|
|
else: |
|
|
|
|
print_table(new_flakes, 'csv') |
|
|
|
|
else: |
|
|
|
|
raise ValueError('Invalid argument for --format: {}'.format(args.format)) |
|
|
|
|
raise ValueError( |
|
|
|
|
'Invalid argument for --format: {}'.format(args.format)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|