Fix python3 performance tests - incomplete migration from python2 (#25734)

This fixes a crash in the grpc_performance_profile_* and grpc_e2e_performance_* tests.

Example failure: https://source.cloud.google.com/results/invocations/2112f2d5-db91-4901-87cb-cc9865f351f1/targets

The daily performance test passes with this fixit branch:
http://sponge2/23d4df20-f4dd-48ee-8789-4cd96b078ed5.

Additional fixes:

reorder imports according to the google style guide
replace deprecated cgi.escape library method with html.escape
In case it comes up, with respect to upgrading from py2 to py3, the iter{items,keys,values} methods are intentionally replaced with the less-efficient-in-python2 non-iter versions. This is a recommended practice even when supporting both python versions, and this is not performance-critical code.

Additional note: there is no performance difference btw py2 and py3 (~80 minutes to finish)
pull/25153/head
AJ Heller 4 years ago committed by GitHub
parent 633b695af4
commit 4ea47d6ae3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 33
      tools/profiling/latency_profile/profile_analyzer.py
  2. 20
      tools/run_tests/run_microbenchmark.py

@ -16,13 +16,14 @@
import argparse import argparse
import collections import collections
import hashlib import hashlib
import itertools
import json import json
import math import math
import sys import sys
import tabulate
import time import time
from six.moves import zip
import tabulate
SELF_TIME = object() SELF_TIME = object()
TIME_FROM_SCOPE_START = object() TIME_FROM_SCOPE_START = object()
TIME_TO_SCOPE_END = object() TIME_TO_SCOPE_END = object()
@ -107,8 +108,8 @@ class CallStackBuilder(object):
def add(self, line): def add(self, line):
line_type = line['type'] line_type = line['type']
self.signature.update(line_type) self.signature.update(line_type.encode('UTF-8'))
self.signature.update(line['tag']) self.signature.update(line['tag'].encode('UTF-8'))
if line_type == '{': if line_type == '{':
self.stk.append(ScopeBuilder(self, line)) self.stk.append(ScopeBuilder(self, line))
return False return False
@ -143,15 +144,15 @@ class CallStack(object):
assert self.signature == call_stack_builder.signature assert self.signature == call_stack_builder.signature
self.count += 1 self.count += 1
assert len(self.lines) == len(call_stack_builder.lines) assert len(self.lines) == len(call_stack_builder.lines)
for lsum, line in itertools.izip(self.lines, call_stack_builder.lines): for lsum, line in zip(self.lines, call_stack_builder.lines):
assert lsum.tag == line.tag assert lsum.tag == line.tag
assert lsum.times.keys() == line.times.keys() assert lsum.times.keys() == line.times.keys()
for k, lst in lsum.times.iteritems(): for k, lst in lsum.times.items():
lst.append(line.times[k]) lst.append(line.times[k])
def finish(self): def finish(self):
for line in self.lines: for line in self.lines:
for lst in line.times.itervalues(): for lst in line.times.values():
lst.sort() lst.sort()
@ -247,18 +248,18 @@ if args.out != '-':
out = open(args.out, 'w') out = open(args.out, 'w')
if args.fmt == 'html': if args.fmt == 'html':
print >> out, '<html>' out.write('<html>')
print >> out, '<head>' out.write('<head>')
print >> out, '<title>Profile Report</title>' out.write('<title>Profile Report</title>')
print >> out, '</head>' out.write('</head>')
accounted_for = 0 accounted_for = 0
for cs in call_stacks: for cs in call_stacks:
print >> out, '\n' out.write('\n')
if args.fmt in BANNER: if args.fmt in BANNER:
print >> out, BANNER[args.fmt] % { out.write(BANNER[args.fmt] % {
'count': cs.count, 'count': cs.count,
} })
header, _ = zip(*FORMAT) header, _ = zip(*FORMAT)
table = [] table = []
for line in cs.lines: for line in cs.lines:
@ -266,10 +267,10 @@ for cs in call_stacks:
for _, fn in FORMAT: for _, fn in FORMAT:
fields.append(fn(line)) fields.append(fn(line))
table.append(fields) table.append(fields)
print >> out, tabulate.tabulate(table, header, tablefmt=args.fmt) out.write(tabulate.tabulate(table, header, tablefmt=args.fmt))
accounted_for += cs.count accounted_for += cs.count
if accounted_for > .99 * total_stacks: if accounted_for > .99 * total_stacks:
break break
if args.fmt == 'html': if args.fmt == 'html':
print '</html>' print('</html>')

@ -13,12 +13,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import cgi import argparse
import html
import multiprocessing import multiprocessing
import os import os
import subprocess import subprocess
import sys import sys
import argparse
import python_utils.jobset as jobset import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server import python_utils.start_port_server as start_port_server
@ -66,13 +66,13 @@ def heading(name):
def link(txt, tgt): def link(txt, tgt):
global index_html global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape( index_html += "<p><a href=\"%s\">%s</a></p>\n" % (html.escape(
tgt, quote=True), cgi.escape(txt)) tgt, quote=True), html.escape(txt))
def text(txt): def text(txt):
global index_html global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt) index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt)
def _bazel_build_benchmark(bm_name, cfg): def _bazel_build_benchmark(bm_name, cfg):
@ -95,8 +95,7 @@ def collect_latency(bm_name, args):
for line in subprocess.check_output([ for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests' '--benchmark_list_tests'
]).splitlines(): ]).decode('UTF-8').splitlines():
line = line.decode('UTF-8')
link(line, '%s.txt' % fnize(line)) link(line, '%s.txt' % fnize(line))
benchmarks.append( benchmarks.append(
jobset.JobSpec([ jobset.JobSpec([
@ -150,8 +149,7 @@ def collect_perf(bm_name, args):
for line in subprocess.check_output([ for line in subprocess.check_output([
'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name, 'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
'--benchmark_list_tests' '--benchmark_list_tests'
]).splitlines(): ]).decode('UTF-8').splitlines():
line = line.decode('UTF-8')
link(line, '%s.svg' % fnize(line)) link(line, '%s.svg' % fnize(line))
benchmarks.append( benchmarks.append(
jobset.JobSpec([ jobset.JobSpec([
@ -201,7 +199,7 @@ def run_summary(bm_name, cfg, base_json_name):
] ]
if args.summary_time is not None: if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time] cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd) return subprocess.check_output(cmd).decode('UTF-8')
def collect_summary(bm_name, args): def collect_summary(bm_name, args):
@ -216,7 +214,7 @@ def collect_summary(bm_name, args):
'tools/profiling/microbenchmarks/bm2bq.py', 'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name, '%s.counters.json' % bm_name,
'%s.opt.json' % bm_name '%s.opt.json' % bm_name
])) ]).decode('UTF-8'))
subprocess.check_call([ subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks', 'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name '%s.csv' % bm_name

Loading…
Cancel
Save