Added support for important mark (!) analysis.

For example, an input of
GRPC_LAT_PROF 1316908107613.015869 0x7fd35f7fe700 { 205 (nil)
src/core/iomgr/tcp_posix.c 1
GRPC_LAT_PROF 1316908107614.015869 0x7fd35f7fe700 { 205 (nil)
src/core/iomgr/tcp_posix.c 2
GRPC_LAT_PROF 1316908107615.015869 0x7fd35f7fe700 ! 999999 (nil)
src/core/iomgr/tcp_posix.c 3
GRPC_LAT_PROF 1316908107616.015869 0x7fd35f7fe700 } 205 (nil)
src/core/iomgr/tcp_posix.c 4
GRPC_LAT_PROF 1316908107617.015869 0x7fd35f7fe700 ! 999999 (nil)
src/core/iomgr/tcp_posix.c 5
GRPC_LAT_PROF 1316908107618.015869 0x7fd35f7fe700 } 205 (nil)
src/core/iomgr/tcp_posix.c 6

results in

tag 50%/90%/95%/99% us
205 5.00/5.00/5.00/5.00

Important marks:
================
999999 @ src/core/iomgr/tcp_posix.c:3
205 { (src/core/iomgr/tcp_posix.c:1):        2.000 us
205 { (src/core/iomgr/tcp_posix.c:2):        1.000 us
205 } (src/core/iomgr/tcp_posix.c:4):       -1.000 us
205 } (src/core/iomgr/tcp_posix.c:6):       -3.000 us

999999 @ src/core/iomgr/tcp_posix.c:5
205 { (src/core/iomgr/tcp_posix.c:1):        4.000 us
205 } (src/core/iomgr/tcp_posix.c:6):       -1.000 us
pull/1491/head
David Garcia Quintas 10 years ago
parent 234e62ce97
commit 5b2ea2979b
  1. 54
      tools/profile_analyzer/profile_analyzer.py

@ -38,19 +38,43 @@ Usage:
import collections import collections
import itertools
import re import re
import sys import sys
# Create a regex to parse output of the C core basic profiler, # Create a regex to parse output of the C core basic profiler,
# as defined in src/core/profiling/basic_timers.c. # as defined in src/core/profiling/basic_timers.c.
_RE_LINE = re.compile(r'GRPC_LAT_PROF ' + _RE_LINE = re.compile(r'GRPC_LAT_PROF ' +
r'([0-9]+\.[0-9]+) 0x([0-9a-f]+) ([{}.]) ([0-9]+) ' + r'([0-9]+\.[0-9]+) 0x([0-9a-f]+) ([{}.!]) ([0-9]+) ' +
r'([^ ]+) ([^ ]+) ([0-9]+)') r'([^ ]+) ([^ ]+) ([0-9]+)')
Entry = collections.namedtuple( Entry = collections.namedtuple(
'Entry', 'Entry',
['time', 'thread', 'type', 'tag', 'id', 'file', 'line']) ['time', 'thread', 'type', 'tag', 'id', 'file', 'line'])
class ImportantMark(object):
def __init__(self, entry, stack):
self._entry = entry
self._pre_stack = stack
self._post_stack = list()
self._n = len(stack) # we'll also compute times to that many closing }s
@property
def entry(self):
return self._entry
def append_post_entry(self, entry):
if self._n > 0:
self._post_stack.append(entry)
self._n -= 1
def get_deltas(self):
pre_and_post_stacks = itertools.chain(self._pre_stack, self._post_stack)
return collections.OrderedDict((stack_entry,
(self._entry.time - stack_entry.time))
for stack_entry in pre_and_post_stacks)
def entries(): def entries():
for line in sys.stdin: for line in sys.stdin:
m = _RE_LINE.match(line) m = _RE_LINE.match(line)
@ -66,13 +90,27 @@ def entries():
threads = collections.defaultdict(lambda: collections.defaultdict(list)) threads = collections.defaultdict(lambda: collections.defaultdict(list))
times = collections.defaultdict(list) times = collections.defaultdict(list)
# Indexed by the mark's tag. Items in the value list correspond to the mark in
# different stack situations.
important_marks = collections.defaultdict(list)
for entry in entries(): for entry in entries():
thread = threads[entry.thread] thread = threads[entry.thread]
if entry.type == '{': if entry.type == '{':
thread[entry.tag].append(entry) thread[entry.tag].append(entry)
if entry.type == '!':
# Save a snapshot of the current stack inside a new ImportantMark instance.
# Get all entries with type '{' from "thread".
stack = [e for entries_for_tag in thread.values()
for e in entries_for_tag if e.type == '{']
important_marks[entry.tag].append(ImportantMark(entry, stack))
elif entry.type == '}': elif entry.type == '}':
last = thread[entry.tag].pop() last = thread[entry.tag].pop()
times[entry.tag].append(entry.time - last.time) times[entry.tag].append(entry.time - last.time)
# Update accounting for important marks.
for imarks_for_tag in important_marks.itervalues():
for imark in imarks_for_tag:
imark.append_post_entry(entry)
def percentile(vals, pct): def percentile(vals, pct):
return sorted(vals)[int(len(vals) * pct / 100.0)] return sorted(vals)[int(len(vals) * pct / 100.0)]
@ -85,3 +123,17 @@ for tag in sorted(times.keys()):
percentile(vals, 90), percentile(vals, 90),
percentile(vals, 95), percentile(vals, 95),
percentile(vals, 99)) percentile(vals, 99))
print
print 'Important marks:'
print '================'
for tag, imark_for_tag in important_marks.iteritems():
for imark in imarks_for_tag:
deltas = imark.get_deltas()
print '{tag} @ {file}:{line}'.format(**imark.entry._asdict())
for entry, time_delta_us in deltas.iteritems():
format_dict = entry._asdict()
format_dict['time_delta_us'] = time_delta_us
print '{tag} {type} ({file}:{line}): {time_delta_us:12.3f} us'.format(
**format_dict)
print

Loading…
Cancel
Save