yapf tools/run_tests/python_utils

pull/13719/head
ncteisen 7 years ago
parent 173c477bd0
commit 05687c3da9
  1. 1
      tools/distrib/yapf_code.sh
  2. 1
      tools/run_tests/python_utils/antagonist.py
  3. 9
      tools/run_tests/python_utils/comment_on_pr.py
  4. 26
      tools/run_tests/python_utils/dockerjob.py
  5. 26
      tools/run_tests/python_utils/filter_pull_request_tests.py
  6. 156
      tools/run_tests/python_utils/jobset.py
  7. 35
      tools/run_tests/python_utils/port_server.py
  8. 41
      tools/run_tests/python_utils/report_utils.py
  9. 9
      tools/run_tests/python_utils/start_port_server.py
  10. 47
      tools/run_tests/python_utils/upload_test_results.py
  11. 5
      tools/run_tests/python_utils/watch_dirs.py

@ -25,6 +25,7 @@ DIRS=(
'tools/distrib'
'tools/interop_matrix'
'tools/profiling'
'tools/run_tests/python_utils'
)
EXCLUSIONS=(
'grpcio/grpc_*.py'

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is used by run_tests.py to create cpu load on a machine"""
while True:

@ -16,6 +16,7 @@ import os
import json
import urllib2
def comment_on_pr(text):
if 'JENKINS_OAUTH_TOKEN' not in os.environ:
print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
@ -24,10 +25,12 @@ def comment_on_pr(text):
print 'Missing ghprbPullId env var: not commenting'
return
req = urllib2.Request(
url = 'https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
url='https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
os.environ['ghprbPullId'],
data = json.dumps({'body': text}),
headers = {
data=json.dumps({
'body': text
}),
headers={
'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
'Content-Type': 'application/json',
})

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to run docker instances as jobs."""
from __future__ import print_function
@ -34,7 +33,8 @@ def random_name(base_name):
def docker_kill(cid):
"""Kills a docker container. Returns True if successful."""
return subprocess.call(['docker','kill', str(cid)],
return subprocess.call(
['docker', 'kill', str(cid)],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT) == 0
@ -45,9 +45,8 @@ def docker_mapped_port(cid, port, timeout_seconds=15):
started = time.time()
while time.time() - started < timeout_seconds:
try:
output = subprocess.check_output('docker port %s %s' % (cid, port),
stderr=_DEVNULL,
shell=True)
output = subprocess.check_output(
'docker port %s %s' % (cid, port), stderr=_DEVNULL, shell=True)
return int(output.split(':', 2)[1])
except subprocess.CalledProcessError as e:
pass
@ -61,7 +60,10 @@ def wait_for_healthy(cid, shortname, timeout_seconds):
while time.time() - started < timeout_seconds:
try:
output = subprocess.check_output(
['docker', 'inspect', '--format="{{.State.Health.Status}}"', cid],
[
'docker', 'inspect', '--format="{{.State.Health.Status}}"',
cid
],
stderr=_DEVNULL)
if output.strip('\n') == 'healthy':
return
@ -83,7 +85,8 @@ def finish_jobs(jobs):
def image_exists(image):
"""Returns True if given docker image exists."""
return subprocess.call(['docker','inspect', image],
return subprocess.call(
['docker', 'inspect', image],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT) == 0
@ -94,7 +97,8 @@ def remove_image(image, skip_nonexistent=False, max_retries=10):
if skip_nonexistent and not image_exists(image):
return True
for attempt in range(0, max_retries):
if subprocess.call(['docker','rmi', '-f', image],
if subprocess.call(
['docker', 'rmi', '-f', image],
stdin=subprocess.PIPE,
stdout=_DEVNULL,
stderr=subprocess.STDOUT) == 0:
@ -109,14 +113,16 @@ class DockerJob:
def __init__(self, spec):
self._spec = spec
self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
self._job = jobset.Job(
spec, newline_on_success=True, travis=True, add_env={})
self._container_name = spec.container_name
def mapped_port(self, port):
return docker_mapped_port(self._container_name, port)
def wait_for_healthy(self, timeout_seconds):
wait_for_healthy(self._container_name, self._spec.shortname, timeout_seconds)
wait_for_healthy(self._container_name, self._spec.shortname,
timeout_seconds)
def kill(self, suppress_failure=False):
"""Sends kill signal to the container."""

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filter out tests based on file differences compared to merge target branch"""
from __future__ import print_function
@ -27,6 +26,7 @@ class TestSuite:
Contains label to identify job as belonging to this test suite and
triggers to identify if changed files are relevant
"""
def __init__(self, labels):
"""
Build TestSuite to group tests based on labeling
@ -55,10 +55,11 @@ _RUBY_TEST_SUITE = TestSuite(['ruby'])
_LINUX_TEST_SUITE = TestSuite(['linux'])
_WINDOWS_TEST_SUITE = TestSuite(['windows'])
_MACOS_TEST_SUITE = TestSuite(['macos'])
_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
_NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
_PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
_WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
_ALL_TEST_SUITES = [
_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
_OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
_LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
]
# Dictionary of whitelistable files where the key is a regex matching changed files
# and the value is a list of tests that should be run. An empty list means that
@ -123,8 +124,10 @@ def _get_changed_files(base_branch):
"""
# Get file changes between branch and merge-base of specified branch
# Not combined to be Windows friendly
base_commit = check_output(["git", "merge-base", base_branch, "HEAD"]).rstrip()
return check_output(["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
base_commit = check_output(
["git", "merge-base", base_branch, "HEAD"]).rstrip()
return check_output(
["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
def _can_skip_tests(file_names, triggers):
@ -165,7 +168,8 @@ def affects_c_cpp(base_branch):
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return True
return not _can_skip_tests(changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
return not _can_skip_tests(
changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
def filter_tests(tests, base_branch):
@ -174,7 +178,9 @@ def filter_tests(tests, base_branch):
:param tests: list of all tests generated by run_tests_matrix.py
:return: list of relevant tests
"""
print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
print(
'Finding file differences between gRPC %s branch and pull request...\n'
% base_branch)
changed_files = _get_changed_files(base_branch)
for changed_file in changed_files:
print(' %s' % changed_file)
@ -183,7 +189,7 @@ def filter_tests(tests, base_branch):
# Run all tests if any changed file is not in the whitelist dictionary
for changed_file in changed_files:
if not re.match(_ALL_TRIGGERS, changed_file):
return(tests)
return (tests)
# Figure out which language and platform tests to run
skippable_labels = []
for test_suite in _ALL_TEST_SUITES:

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a group of subprocesses and then finish."""
from __future__ import print_function
@ -28,11 +27,9 @@ import tempfile
import time
import errno
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
_MAX_RESULT_SIZE = 8192
@ -71,34 +68,31 @@ def platform_string():
if platform_string() == 'windows':
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [ 31, 0 ],
'green': [ 32, 0 ],
'yellow': [ 33, 0 ],
'lightgray': [ 37, 0],
'gray': [ 30, 1 ],
'purple': [ 35, 0 ],
'cyan': [ 36, 0 ]
}
'red': [31, 0],
'green': [32, 0],
'yellow': [33, 0],
'lightgray': [37, 0],
'gray': [30, 1],
'purple': [35, 0],
'cyan': [36, 0]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
@ -111,7 +105,7 @@ _TAG_COLOR = {
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
@ -127,7 +121,6 @@ def eintr_be_gone(fn):
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
@ -141,23 +134,22 @@ def message(tag, msg, explanatory_text=None, do_newline=False):
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
_BEGINNING_OF_LINE,
_CLEAR_LINE,
'\n%s' % explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1],
_COLORS[_TAG_COLOR[tag]][0],
tag,
msg,
'\n' if do_newline or explanatory_text is not None else ''))
_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
tag, msg, '\n'
if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError, e:
if e.errno != errno.EINTR:
raise
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
@ -170,9 +162,17 @@ def which(filename):
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self, cmdline, shortname=None, environ=None,
cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
timeout_retries=0, kill_handler=None, cpu_cost=1.0,
def __init__(self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False):
"""
Arguments:
@ -205,15 +205,18 @@ class JobSpec(object):
return self.identity() == other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
self.cmdline)
def __str__(self):
return '%s: %s %s' % (self.shortname,
' '.join('%s=%s' % kv for kv in self.environ.items()),
' '.join('%s=%s' % kv
for kv in self.environ.items()),
' '.join(self.cmdline))
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
@ -233,7 +236,11 @@ def read_from_start(f):
class Job(object):
"""Manages one job."""
def __init__(self, spec, newline_on_success, travis, add_env,
def __init__(self,
spec,
newline_on_success,
travis,
add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
@ -278,7 +285,8 @@ class Job(object):
self._process = try_start()
break
except OSError:
message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
message('WARNING', 'Failed to start %s, retrying in %f seconds'
% (self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
@ -287,18 +295,24 @@ class Job(object):
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
stdout = read_from_start(self._tempfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message('FLAKE', '%s [ret=%d, pid=%d]' % (
self._spec.shortname, self._process.returncode, self._process.pid),
stdout(), do_newline=True)
message(
'FLAKE',
'%s [ret=%d, pid=%d]' %
(self._spec.shortname, self._process.returncode,
self._process.pid),
stdout(),
do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
@ -307,9 +321,13 @@ class Job(object):
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message('FAILED', '%s [ret=%d, pid=%d, time=%.1fsec]' % (
self._spec.shortname, self._process.returncode, self._process.pid, elapsed),
stdout(), do_newline=True)
message(
'FAILED',
'%s [ret=%d, pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.returncode,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
@ -317,18 +335,25 @@ class Job(object):
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)', stdout())
m = re.search(
r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
self.result.cpu_estimated = float('%.01f' % self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (self.result.cpu_measured, self.result.cpu_estimated)
self.result.cpu_estimated = float('%.01f' %
self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
message('PASSED', '%s [time=%.1fsec, retries=%d:%d%s]' % (
self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
message(
'PASSED',
'%s [time=%.1fsec, retries=%d:%d%s]' %
(self._spec.shortname, elapsed, self._retries,
self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
@ -338,7 +363,11 @@ class Job(object):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
message(
'TIMEOUT_FLAKE',
'%s [pid=%d]' % (self._spec.shortname, self._process.pid),
stdout(),
do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
@ -348,7 +377,12 @@ class Job(object):
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message('TIMEOUT', '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname, self._process.pid, elapsed), stdout(), do_newline=True)
message(
'TIMEOUT',
'%s [pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
@ -368,8 +402,9 @@ class Job(object):
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic, newline_on_success, travis,
stop_on_failure, add_env, quiet_success, max_time):
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
@ -402,7 +437,8 @@ class Jobset(object):
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self._max_time > 0 and time.time() - self._start_time > self._max_time:
if self._max_time > 0 and time.time(
) - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
@ -416,10 +452,7 @@ class Jobset(object):
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled(): return False
job = Job(spec,
self._newline_on_success,
self._travis,
self._add_env,
job = Job(spec, self._newline_on_success, self._travis, self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
@ -452,14 +485,19 @@ class Jobset(object):
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (self._remaining + len(self._running))
remaining = sofar / self._completed * (
self._remaining + len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
if waiting_for is not None:
wstr = ' next: %s @ %.2f cpu' % (waiting_for, waiting_for_cost)
wstr = ' next: %s @ %.2f cpu' % (waiting_for,
waiting_for_cost)
else:
wstr = ''
message('WAITING', '%s%d jobs running, %d complete, %d failed (load %.2f)%s' % (
rstr, len(self._running), self._completed, self._failures, self.cpu_cost(), wstr))
message(
'WAITING',
'%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
(rstr, len(self._running), self._completed, self._failures,
self.cpu_cost(), wstr))
if platform_string() == 'windows':
time.sleep(0.1)
else:
@ -519,9 +557,9 @@ def run(cmdlines,
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(check_cancelled,
maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
_DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage TCP ports for unit tests; started by run_tests.py"""
import argparse
@ -27,18 +26,15 @@ from SocketServer import ThreadingMixIn
import threading
import platform
# increment this number whenever making a change to ensure that
# the changes are picked up by running CI servers
# note that all changes must be backwards compatible
_MY_VERSION = 20
if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
print _MY_VERSION
sys.exit(0)
argp = argparse.ArgumentParser(description='Server for httpcli_test')
argp.add_argument('-p', '--port', default=12345, type=int)
argp.add_argument('-l', '--logfile', default=None, type=str)
@ -61,12 +57,14 @@ mu = threading.Lock()
# https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
# ports is used in a Cronet test, the test would fail (see issue #12149). These
# ports must be excluded from pool.
cronet_restricted_ports = [1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37,
42, 43, 53, 77, 79, 87, 95, 101, 102, 103, 104, 109,
110, 111, 113, 115, 117, 119, 123, 135, 139, 143,
179, 389, 465, 512, 513, 514, 515, 526, 530, 531,
532, 540, 556, 563, 587, 601, 636, 993, 995, 2049,
3659, 4045, 6000, 6665, 6666, 6667, 6668, 6669, 6697]
cronet_restricted_ports = [
1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87,
95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139,
143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563,
587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668,
6669, 6697
]
def can_connect(port):
# this test is only really useful on unices where SO_REUSE_PORT is available
@ -81,6 +79,7 @@ def can_connect(port):
finally:
s.close()
def can_bind(port, proto):
s = socket.socket(proto, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
@ -95,7 +94,10 @@ def can_bind(port, proto):
def refill_pool(max_timeout, req):
"""Scan for ports not marked for being in use"""
chk = [port for port in list(range(1025, 32766)) if port not in cronet_restricted_ports]
chk = [
port for port in list(range(1025, 32766))
if port not in cronet_restricted_ports
]
random.shuffle(chk)
for i in chk:
if len(pool) > 100: break
@ -105,7 +107,8 @@ def refill_pool(max_timeout, req):
continue
req.log_message("kill old request %d" % i)
del in_use[i]
if can_bind(i, socket.AF_INET) and can_bind(i, socket.AF_INET6) and not can_connect(i):
if can_bind(i, socket.AF_INET) and can_bind(
i, socket.AF_INET6) and not can_connect(i):
req.log_message("found available port %d" % i)
pool.append(i)
@ -182,7 +185,11 @@ class Handler(BaseHTTPRequestHandler):
self.end_headers()
mu.acquire()
now = time.time()
out = yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())})
out = yaml.dump(
{
'pool': pool,
'in_use': dict((k, now - v) for k, v in in_use.items())
})
mu.release()
self.wfile.write(out)
elif self.path == '/quitquitquit':
@ -190,7 +197,9 @@ class Handler(BaseHTTPRequestHandler):
self.end_headers()
self.server.shutdown()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread"""
ThreadedHTTPServer(('', args.port), Handler).serve_forever()

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate XML and HTML test reports."""
from __future__ import print_function
@ -47,13 +46,17 @@ def _filter_msg(msg, output_format):
def new_junit_xml_tree():
return ET.ElementTree(ET.Element('testsuites'))
def render_junit_xml_report(resultset, report_file, suite_package='grpc',
def render_junit_xml_report(resultset,
report_file,
suite_package='grpc',
suite_name='tests'):
"""Generate JUnit-like XML report."""
tree = new_junit_xml_tree()
append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
create_xml_report_file(tree, report_file)
def create_xml_report_file(tree, report_file):
"""Generate JUnit-like report file from xml tree ."""
# ensure the report directory exists
@ -62,10 +65,15 @@ def create_xml_report_file(tree, report_file):
os.makedirs(report_dir)
tree.write(report_file, encoding='UTF-8')
def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
"""Append a JUnit-like XML report tree with test results as a new suite."""
testsuite = ET.SubElement(tree.getroot(), 'testsuite',
id=id, package=suite_package, name=suite_name,
testsuite = ET.SubElement(
tree.getroot(),
'testsuite',
id=id,
package=suite_package,
name=suite_name,
timestamp=datetime.datetime.now().isoformat())
failure_count = 0
error_count = 0
@ -76,26 +84,30 @@ def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
xml_test.set('time', str(result.elapsed_time))
filtered_msg = _filter_msg(result.message, 'XML')
if result.state == 'FAILED':
ET.SubElement(xml_test, 'failure', message='Failure').text = filtered_msg
ET.SubElement(
xml_test, 'failure', message='Failure').text = filtered_msg
failure_count += 1
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout').text = filtered_msg
ET.SubElement(
xml_test, 'error', message='Timeout').text = filtered_msg
error_count += 1
elif result.state == 'SKIPPED':
ET.SubElement(xml_test, 'skipped', message='Skipped')
testsuite.set('failures', str(failure_count))
testsuite.set('errors', str(error_count))
def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
http2_server_cases, resultset,
num_failures, cloud_to_prod, prod_servers, http2_interop):
def render_interop_html_report(client_langs, server_langs, test_cases,
auth_test_cases, http2_cases, http2_server_cases,
resultset, num_failures, cloud_to_prod,
prod_servers, http2_interop):
"""Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop/interop_html_report.template'
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
print('Mako template is not installed. Skipping HTML report generation.')
print(
'Mako template is not installed. Skipping HTML report generation.')
return
except IOError as e:
print('Failed to find the template %s: %s' % (template_file, e))
@ -109,7 +121,8 @@ def render_interop_html_report(
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
args = {'client_langs': sorted_client_langs,
args = {
'client_langs': sorted_client_langs,
'server_langs': sorted_server_langs,
'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases,
@ -119,7 +132,8 @@ def render_interop_html_report(
'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod,
'prod_servers': sorted_prod_servers,
'http2_interop': http2_interop}
'http2_interop': http2_interop
}
html_report_out_dir = 'reports'
if not os.path.exists(html_report_out_dir):
@ -132,6 +146,7 @@ def render_interop_html_report(
print(exceptions.text_error_template().render())
raise
def render_perf_profiling_results(output_filepath, profile_names):
with open(output_filepath, 'w') as output_file:
output_file.write('<ul>\n')

@ -22,10 +22,10 @@ import sys
import tempfile
import time
# must be synchronized with test/core/utils/port_server_client.h
_PORT_SERVER_PORT = 32766
def start_port_server():
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
@ -33,8 +33,7 @@ def start_port_server():
# otherwise, leave it up
try:
version = int(
urllib.urlopen(
'http://localhost:%d/version_number' %
urllib.urlopen('http://localhost:%d/version_number' %
_PORT_SERVER_PORT).read())
logging.info('detected port server running version %d', version)
running = True
@ -92,8 +91,8 @@ def start_port_server():
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
urllib.urlopen(
'http://localhost:%d/get' % _PORT_SERVER_PORT).read()
urllib.urlopen('http://localhost:%d/get' %
_PORT_SERVER_PORT).read()
logging.info(
'last ditch attempt to contact port server succeeded')
break

@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to upload Jenkins test results to BQ"""
from __future__ import print_function
@ -23,8 +22,8 @@ import sys
import time
import uuid
gcp_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/utils'))
gcp_utils_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(gcp_utils_dir)
import big_query_utils
@ -55,8 +54,10 @@ _INTEROP_RESULTS_SCHEMA = [
('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
('test_name', 'STRING', 'Unique test name combining client, server, and test_name'),
('suite', 'STRING', 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
('test_name', 'STRING',
'Unique test name combining client, server, and test_name'),
('suite', 'STRING',
'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
('client', 'STRING', 'Client language'),
('server', 'STRING', 'Server host name'),
('test_case', 'STRING', 'Name of test case'),
@ -92,8 +93,15 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
platform: string name of platform tests were run on
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
big_query_utils.create_partitioned_table(
bq,
_PROJECT_ID,
_DATASET_ID,
bq_table,
_RESULTS_SCHEMA,
_DESCRIPTION,
partition_type=_PARTITION_TYPE,
expiration_ms=_EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
for result in results:
@ -119,13 +127,16 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
# TODO(jtattermusch): rows are inserted one by one, very inefficient
max_retries = 3
for attempt in range(max_retries):
if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
bq_table, [row]):
break
else:
if attempt < max_retries - 1:
print('Error uploading result to bigquery, will retry.')
else:
print('Error uploading result to bigquery, all attempts failed.')
print(
'Error uploading result to bigquery, all attempts failed.'
)
sys.exit(1)
@ -138,8 +149,15 @@ def upload_interop_results_to_bq(resultset, bq_table, args):
args: args in run_interop_tests.py, generated by argparse
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _INTEROP_RESULTS_SCHEMA, _DESCRIPTION,
partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
big_query_utils.create_partitioned_table(
bq,
_PROJECT_ID,
_DATASET_ID,
bq_table,
_INTEROP_RESULTS_SCHEMA,
_DESCRIPTION,
partition_type=_PARTITION_TYPE,
expiration_ms=_EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
for result in results:
@ -157,11 +175,14 @@ def upload_interop_results_to_bq(resultset, bq_table, args):
# TODO(jtattermusch): rows are inserted one by one, very inefficient
max_retries = 3
for attempt in range(max_retries):
if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
bq_table, [row]):
break
else:
if attempt < max_retries - 1:
print('Error uploading result to bigquery, will retry.')
else:
print('Error uploading result to bigquery, all attempts failed.')
print(
'Error uploading result to bigquery, all attempts failed.'
)
sys.exit(1)

@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to watch a (set) of directories for modifications."""
import os
@ -49,7 +48,8 @@ class DirWatcher(object):
if most_recent_change is None:
most_recent_change = st.st_mtime
else:
most_recent_change = max(most_recent_change, st.st_mtime)
most_recent_change = max(most_recent_change,
st.st_mtime)
return most_recent_change
def most_recent_change(self):
@ -57,4 +57,3 @@ class DirWatcher(object):
self._cache = self._calculate()
self.lastrun = time.time()
return self._cache

Loading…
Cancel
Save