Fix python3 performance tests - incomplete migration from python2 (#25734)

This fixes a crash in the grpc_performance_profile_* and grpc_e2e_performance_* tests.

Example failure: https://source.cloud.google.com/results/invocations/2112f2d5-db91-4901-87cb-cc9865f351f1/targets

The daily performance test passes with this fixit branch:
http://sponge2/23d4df20-f4dd-48ee-8789-4cd96b078ed5.

Additional fixes:

reorder imports according to the google style guide
replace deprecated cgi.escape library method with html.escape
In case it comes up, with respect to upgrading from py2 to py3, the iter{items,keys,values} methods are intentionally replaced with the less-efficient-in-python2 non-iter versions. This is a recommended practice even when supporting both python versions, and this is not performance-critical code.

Additional note: there is no performance difference btw py2 and py3 (~80 minutes to finish)
pull/25153/head
AJ Heller 4 years ago committed by GitHub
parent 633b695af4
commit 4ea47d6ae3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 33
      tools/profiling/latency_profile/profile_analyzer.py
  2. 20
      tools/run_tests/run_microbenchmark.py

@ -16,13 +16,14 @@
import argparse
import collections
import hashlib
import itertools
import json
import math
import sys
import tabulate
import time
from six.moves import zip
import tabulate
SELF_TIME = object()
TIME_FROM_SCOPE_START = object()
TIME_TO_SCOPE_END = object()
@ -107,8 +108,8 @@ class CallStackBuilder(object):
def add(self, line):
line_type = line['type']
self.signature.update(line_type)
self.signature.update(line['tag'])
self.signature.update(line_type.encode('UTF-8'))
self.signature.update(line['tag'].encode('UTF-8'))
if line_type == '{':
self.stk.append(ScopeBuilder(self, line))
return False
@ -143,15 +144,15 @@ class CallStack(object):
assert self.signature == call_stack_builder.signature
self.count += 1
assert len(self.lines) == len(call_stack_builder.lines)
for lsum, line in itertools.izip(self.lines, call_stack_builder.lines):
for lsum, line in zip(self.lines, call_stack_builder.lines):
assert lsum.tag == line.tag
assert lsum.times.keys() == line.times.keys()
for k, lst in lsum.times.iteritems():
for k, lst in lsum.times.items():
lst.append(line.times[k])
def finish(self):
for line in self.lines:
for lst in line.times.itervalues():
for lst in line.times.values():
lst.sort()
@ -247,18 +248,18 @@ if args.out != '-':
out = open(args.out, 'w')
if args.fmt == 'html':
print >> out, '<html>'
print >> out, '<head>'
print >> out, '<title>Profile Report</title>'
print >> out, '</head>'
out.write('<html>')
out.write('<head>')
out.write('<title>Profile Report</title>')
out.write('</head>')
accounted_for = 0
for cs in call_stacks:
print >> out, '\n'
out.write('\n')
if args.fmt in BANNER:
print >> out, BANNER[args.fmt] % {
out.write(BANNER[args.fmt] % {
'count': cs.count,
}
})
header, _ = zip(*FORMAT)
table = []
for line in cs.lines:
@ -266,10 +267,10 @@ for cs in call_stacks:
for _, fn in FORMAT:
fields.append(fn(line))
table.append(fields)
print >> out, tabulate.tabulate(table, header, tablefmt=args.fmt)
out.write(tabulate.tabulate(table, header, tablefmt=args.fmt))
accounted_for += cs.count
if accounted_for > .99 * total_stacks:
break
if args.fmt == 'html':
print '</html>'
print('</html>')

@ -13,12 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import argparse
import html
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
@ -66,13 +66,13 @@ def heading(name):
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
tgt, quote=True), cgi.escape(txt))
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (html.escape(
tgt, quote=True), html.escape(txt))
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt)
def _bazel_build_benchmark(bm_name, cfg):
@ -95,8 +95,7 @@ def collect_latency(bm_name, args):
for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests'
]).splitlines():
line = line.decode('UTF-8')
]).decode('UTF-8').splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec([
@ -150,8 +149,7 @@ def collect_perf(bm_name, args):
for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests'
]).splitlines():
line = line.decode('UTF-8')
]).decode('UTF-8').splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec([
@ -201,7 +199,7 @@ def run_summary(bm_name, cfg, base_json_name):
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
return subprocess.check_output(cmd).decode('UTF-8')
def collect_summary(bm_name, args):
@ -216,7 +214,7 @@ def collect_summary(bm_name, args):
'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name
]))
]).decode('UTF-8'))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name

Loading…
Cancel
Save