Merge branch 'epex4' of github.com:ctiller/grpc into epex4

pull/10712/head
Craig Tiller 8 years ago
commit 3fb0f94650
  1. 6
      src/cpp/thread_manager/thread_manager.cc
  2. 8
      src/objective-c/BoringSSL.podspec
  3. 4
      test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
  4. 2
      test/cpp/qps/client_sync.cc
  5. 91
      tools/profiling/microbenchmarks/bm_diff.py

@ -159,15 +159,15 @@ void ThreadManager::MainWorkLoop() {
// Take the lock again to check post conditions
lock.lock();
// If we're shutdown, we should finish at this point.
// If not, there's a chance that we'll exceed the max poller count: that
// is explicitly ok - we'll decrease after one poll timeout, and prevent
// some thrashing starting up and shutting down threads
if (shutdown_) done = true;
break;
}
// If we decided to finish the thread, break out of the while loop
if (done) break;
// ... otherwise increase poller count and continue
// There's a chance that we'll exceed the max poller count: that is
// explicitly ok - we'll decrease after one poll timeout, and prevent
// some thrashing starting up and shutting down threads
num_pollers_++;
};

@ -31,7 +31,7 @@
Pod::Spec.new do |s|
s.name = 'BoringSSL'
version = '8.1'
version = '8.2'
s.version = version
s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
# Adapted from the homepage:
@ -113,7 +113,6 @@ Pod::Spec.new do |s|
s.subspec 'Interface' do |ss|
ss.header_mappings_dir = 'include/openssl'
ss.source_files = 'include/openssl/*.h'
ss.exclude_files = 'include/openssl/arm_arch.h'
end
s.subspec 'Implementation' do |ss|
ss.header_mappings_dir = '.'
@ -150,6 +149,11 @@ Pod::Spec.new do |s|
#include "ssl.h"
#include "crypto.h"
#include "aes.h"
/* The following macros are defined by base.h. The latter is the first file included by the
other headers. */
#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
# include "arm_arch.h"
#endif
#include "asn1.h"
#include "asn1_mac.h"
#include "asn1t.h"

@ -67,7 +67,9 @@ static void pollset_init(grpc_pollset* ps, gpr_mu** mu) {
*mu = &ps->mu;
}
static void pollset_destroy(grpc_pollset* ps) { gpr_mu_destroy(&ps->mu); }
static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* ps) {
gpr_mu_destroy(&ps->mu);
}
static grpc_error* pollset_kick(grpc_pollset* p, grpc_pollset_worker* worker) {
return GRPC_ERROR_NONE;

@ -167,7 +167,7 @@ class SynchronousStreamingClient final : public SynchronousClient {
}
});
}
for (size_t i=0; i<num_threads_; i++) {
for (size_t i = 0; i < num_threads_; i++) {
cleanup_threads[i].join();
}
}

@ -192,52 +192,59 @@ class Benchmark:
return [self.final[f] if f in self.final else '' for f in flds]
def read_file(filename):
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
with open(filename) as f:
return f.read()
return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
def read_json(filename):
return json.loads(read_file(filename))
benchmarks = collections.defaultdict(Benchmark)
for bm in args.benchmarks:
for loop in range(0, args.loops):
js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, True)
for row in bm_json.expand_json(js_old_ctr, js_old_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, False)
really_interesting = set()
for name, bm in benchmarks.items():
print name
really_interesting.update(bm.process())
fields = [f for f in args.track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
if rows:
text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
else:
text = 'No significant performance differences'
comment_on_pr.comment_on_pr('```\n%s\n```' % text)
print text
with open(filename) as f: return json.loads(f.read())
def finalize():
benchmarks = collections.defaultdict(Benchmark)
for bm in args.benchmarks:
for loop in range(0, args.loops):
js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
js_old_opt = read_json('%s.opt.old.%d.json' % (bm, loop))
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, True)
for row in bm_json.expand_json(js_old_ctr, js_old_opt):
print row
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, False)
really_interesting = set()
for name, bm in benchmarks.items():
print name
really_interesting.update(bm.process())
fields = [f for f in args.track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
if rows:
text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
else:
text = 'No significant performance differences'
comment_on_pr.comment_on_pr('```\n%s\n```' % text)
print text
eintr_be_gone(finalize)

Loading…
Cancel
Save