Merge pull request #15705 from jtattermusch/bq_batch_upload

Upload test results to BQ in batches
pull/15723/head
Jan Tattermusch 7 years ago committed by GitHub
commit aa1aa4329f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      tools/run_tests/python_utils/upload_rbe_results.py
  2. 11
      tools/run_tests/python_utils/upload_test_results.py

@ -125,7 +125,7 @@ def _get_resultstore_data(api_key, invocation_id):
if __name__ == "__main__":
# Arguments are necessary if running in a non-Kokoro envrionment.
# Arguments are necessary if running in a non-Kokoro environment.
argp = argparse.ArgumentParser(description='Upload RBE results.')
argp.add_argument('--api_key', default='', type=str)
argp.add_argument('--invocation_id', default='', type=str)

@ -163,6 +163,7 @@ def upload_interop_results_to_bq(resultset, bq_table, args):
expiration_ms=_EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
bq_rows = []
for result in results:
test_results = {}
_get_build_metadata(test_results)
@ -175,11 +176,15 @@ def upload_interop_results_to_bq(resultset, bq_table, args):
test_results['test_case'] = shortname.split(':')[3]
test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
# TODO(jtattermusch): rows are inserted one by one, very inefficient
bq_rows.append(row)
# BigQuery sometimes fails with large uploads, so batch 1,000 rows at a time.
for i in range((len(bq_rows) / 1000) + 1):
max_retries = 3
for attempt in range(max_retries):
if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
bq_table, [row]):
if big_query_utils.insert_rows(
bq, _PROJECT_ID, _DATASET_ID, bq_table,
bq_rows[i * 1000:(i + 1) * 1000]):
break
else:
if attempt < max_retries - 1:

Loading…
Cancel
Save