The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#) https://grpc.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

682 lines
24 KiB

/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <cinttypes>
#include <deque>
9 years ago
#include <list>
#include <thread>
#include <unordered_map>
9 years ago
#include <vector>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpcpp/channel.h>
#include <grpcpp/client_context.h>
#include <grpcpp/create_channel.h>
9 years ago
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gprpp/host_port.h"
8 years ago
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/worker_service.grpc.pb.h"
#include "test/core/util/port.h"
10 years ago
#include "test/core/util/test_config.h"
#include "test/cpp/qps/client.h"
9 years ago
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/qps_worker.h"
#include "test/cpp/qps/stats.h"
#include "test/cpp/util/test_credentials_provider.h"
using std::deque;
using std::list;
using std::unique_ptr;
using std::vector;
namespace grpc {
namespace testing {
static std::string get_host(const std::string& worker) {
absl::string_view host;
absl::string_view port;
grpc_core::SplitHostPort(worker.c_str(), &host, &port);
return std::string(host.data(), host.size());
}
static deque<string> get_workers(const string& env_name) {
deque<string> out;
char* env = gpr_getenv(env_name.c_str());
if (!env) {
env = gpr_strdup("");
}
char* p = env;
if (strlen(env) != 0) {
for (;;) {
char* comma = strchr(p, ',');
if (comma) {
out.emplace_back(p, comma);
p = comma + 1;
} else {
out.emplace_back(p);
break;
}
}
}
if (out.empty()) {
gpr_log(GPR_ERROR,
"Environment variable \"%s\" does not contain a list of QPS "
"workers to use. Set it to a comma-separated list of "
"hostname:port pairs, starting with hosts that should act as "
"servers. E.g. export "
"%s=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"",
env_name.c_str(), env_name.c_str());
}
gpr_free(env);
return out;
}
std::string GetCredType(
const std::string& worker_addr,
const std::map<std::string, std::string>& per_worker_credential_types,
const std::string& credential_type) {
auto it = per_worker_credential_types.find(worker_addr);
if (it != per_worker_credential_types.end()) {
return it->second;
}
return credential_type;
}
// helpers for postprocess_scenario_result
static double WallTime(const ClientStats& s) { return s.time_elapsed(); }
static double SystemTime(const ClientStats& s) { return s.time_system(); }
static double UserTime(const ClientStats& s) { return s.time_user(); }
static double CliPollCount(const ClientStats& s) { return s.cq_poll_count(); }
static double SvrPollCount(const ServerStats& s) { return s.cq_poll_count(); }
static double ServerWallTime(const ServerStats& s) { return s.time_elapsed(); }
static double ServerSystemTime(const ServerStats& s) { return s.time_system(); }
static double ServerUserTime(const ServerStats& s) { return s.time_user(); }
static double ServerTotalCpuTime(const ServerStats& s) {
return s.total_cpu_time();
}
static double ServerIdleCpuTime(const ServerStats& s) {
return s.idle_cpu_time();
}
static int Cores(int n) { return n; }
static bool IsSuccess(const Status& s) {
if (s.ok()) return true;
// Since we shutdown servers and clients at the same time, they both can
// observe cancellation. Thus, we consider CANCELLED as good status.
if (static_cast<StatusCode>(s.error_code()) == StatusCode::CANCELLED) {
return true;
}
// Since we shutdown servers and clients at the same time, server can close
// the socket before the client attempts to do that, and vice versa. Thus
// receiving a "Socket closed" error is fine.
if (s.error_message() == "Socket closed") return true;
return false;
}
// Postprocess ScenarioResult and populate result summary.
static void postprocess_scenario_result(ScenarioResult* result) {
// Get latencies from ScenarioResult latencies histogram and populate to
// result summary.
Histogram histogram;
histogram.MergeProto(result->latencies());
result->mutable_summary()->set_latency_50(histogram.Percentile(50));
result->mutable_summary()->set_latency_90(histogram.Percentile(90));
result->mutable_summary()->set_latency_95(histogram.Percentile(95));
result->mutable_summary()->set_latency_99(histogram.Percentile(99));
result->mutable_summary()->set_latency_999(histogram.Percentile(99.9));
5 years ago
// Calculate qps and cpu load for each client and then aggregate results for
// all clients
double qps = 0;
double client_system_cpu_load = 0, client_user_cpu_load = 0;
for (int i = 0; i < result->client_stats_size(); i++) {
auto client_stat = result->client_stats(i);
5 years ago
qps += client_stat.latencies().count() / client_stat.time_elapsed();
5 years ago
client_system_cpu_load +=
client_stat.time_system() / client_stat.time_elapsed();
client_user_cpu_load +=
client_stat.time_user() / client_stat.time_elapsed();
}
5 years ago
// Calculate cpu load for each server and then aggregate results for all
// servers
double server_system_cpu_load = 0, server_user_cpu_load = 0;
for (int i = 0; i < result->server_stats_size(); i++) {
auto server_stat = result->server_stats(i);
5 years ago
server_system_cpu_load +=
server_stat.time_system() / server_stat.time_elapsed();
server_user_cpu_load +=
server_stat.time_user() / server_stat.time_elapsed();
}
result->mutable_summary()->set_qps(qps);
// Populate the percentage of cpu load to result summary.
5 years ago
result->mutable_summary()->set_server_system_time(100 *
server_system_cpu_load);
result->mutable_summary()->set_server_user_time(100 * server_user_cpu_load);
5 years ago
result->mutable_summary()->set_client_system_time(100 *
client_system_cpu_load);
result->mutable_summary()->set_client_user_time(100 * client_user_cpu_load);
// For Non-linux platform, get_cpu_usage() is not implemented. Thus,
// ServerTotalCpuTime and ServerIdleCpuTime are both 0.
if (average(result->server_stats(), ServerTotalCpuTime) == 0) {
result->mutable_summary()->set_server_cpu_usage(0);
} else {
auto server_cpu_usage =
100 - 100 * average(result->server_stats(), ServerIdleCpuTime) /
average(result->server_stats(), ServerTotalCpuTime);
result->mutable_summary()->set_server_cpu_usage(server_cpu_usage);
}
// Calculate and populate successful request per second and failed requests
// per seconds to result summary.
auto time_estimate = average(result->client_stats(), WallTime);
if (result->request_results_size() > 0) {
int64_t successes = 0;
int64_t failures = 0;
for (int i = 0; i < result->request_results_size(); i++) {
const RequestResultCount& rrc = result->request_results(i);
if (rrc.status_code() == 0) {
successes += rrc.count();
} else {
failures += rrc.count();
}
}
8 years ago
result->mutable_summary()->set_successful_requests_per_second(
successes / time_estimate);
result->mutable_summary()->set_failed_requests_per_second(failures /
time_estimate);
}
8 years ago
// Fill in data for other metrics required in result summary
auto qps_per_server_core = qps / sum(result->server_cores(), Cores);
result->mutable_summary()->set_qps_per_server_core(qps_per_server_core);
8 years ago
result->mutable_summary()->set_client_polls_per_request(
sum(result->client_stats(), CliPollCount) / histogram.Count());
result->mutable_summary()->set_server_polls_per_request(
sum(result->server_stats(), SvrPollCount) / histogram.Count());
auto server_queries_per_cpu_sec =
histogram.Count() / (sum(result->server_stats(), ServerSystemTime) +
sum(result->server_stats(), ServerUserTime));
auto client_queries_per_cpu_sec =
histogram.Count() / (sum(result->client_stats(), SystemTime) +
sum(result->client_stats(), UserTime));
result->mutable_summary()->set_server_queries_per_cpu_sec(
server_queries_per_cpu_sec);
result->mutable_summary()->set_client_queries_per_cpu_sec(
client_queries_per_cpu_sec);
}
5 years ago
struct ClientData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
struct ServerData {
unique_ptr<WorkerService::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
static void FinishClients(const std::vector<ClientData>& clients,
const ClientArgs& client_mark) {
gpr_log(GPR_INFO, "Finishing clients");
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
if (!client->stream->Write(client_mark)) {
gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
GPR_ASSERT(false);
}
if (!client->stream->WritesDone()) {
gpr_log(GPR_ERROR, "Failed WritesDone for client %zu", i);
GPR_ASSERT(false);
}
}
}
static void ReceiveFinalStatusFromClients(
const std::vector<ClientData>& clients, Histogram& merged_latencies,
std::unordered_map<int, int64_t>& merged_statuses, ScenarioResult& result) {
gpr_log(GPR_INFO, "Receiving final status from clients");
ClientStatus client_status;
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
// Read the client final status
if (client->stream->Read(&client_status)) {
gpr_log(GPR_INFO, "Received final status from client %zu", i);
const auto& stats = client_status.stats();
merged_latencies.MergeProto(stats.latencies());
for (int i = 0; i < stats.request_results_size(); i++) {
merged_statuses[stats.request_results(i).status_code()] +=
stats.request_results(i).count();
}
result.add_client_stats()->CopyFrom(stats);
// That final status should be the last message on the client stream
GPR_ASSERT(!client->stream->Read(&client_status));
} else {
gpr_log(GPR_ERROR, "Couldn't get final status from client %zu", i);
GPR_ASSERT(false);
}
}
}
static void ShutdownClients(const std::vector<ClientData>& clients,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Shutdown clients");
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
Status s = client->stream->Finish();
// Since we shutdown servers and clients at the same time, clients can
// observe cancellation. Thus, we consider both OK and CANCELLED as good
// status.
const bool success = IsSuccess(s);
result.add_client_success(success);
if (!success) {
gpr_log(GPR_ERROR, "Client %zu had an error %s", i,
s.error_message().c_str());
GPR_ASSERT(false);
}
}
}
static void FinishServers(const std::vector<ServerData>& servers,
const ServerArgs& server_mark) {
gpr_log(GPR_INFO, "Finishing servers");
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
if (!server->stream->Write(server_mark)) {
gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
GPR_ASSERT(false);
}
if (!server->stream->WritesDone()) {
gpr_log(GPR_ERROR, "Failed WritesDone for server %zu", i);
GPR_ASSERT(false);
}
}
}
static void ReceiveFinalStatusFromServer(const std::vector<ServerData>& servers,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Receiving final status from servers");
ServerStatus server_status;
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
// Read the server final status
if (server->stream->Read(&server_status)) {
gpr_log(GPR_INFO, "Received final status from server %zu", i);
result.add_server_stats()->CopyFrom(server_status.stats());
result.add_server_cores(server_status.cores());
// That final status should be the last message on the server stream
GPR_ASSERT(!server->stream->Read(&server_status));
} else {
gpr_log(GPR_ERROR, "Couldn't get final status from server %zu", i);
GPR_ASSERT(false);
}
}
}
static void ShutdownServers(const std::vector<ServerData>& servers,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Shutdown servers");
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
Status s = server->stream->Finish();
// Since we shutdown servers and clients at the same time, servers can
// observe cancellation. Thus, we consider both OK and CANCELLED as good
// status.
const bool success = IsSuccess(s);
result.add_server_success(success);
if (!success) {
gpr_log(GPR_ERROR, "Server %zu had an error %s", i,
s.error_message().c_str());
GPR_ASSERT(false);
}
}
}
std::vector<grpc::testing::Server*>* g_inproc_servers = nullptr;
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
const ServerConfig& initial_server_config, size_t num_servers,
int warmup_seconds, int benchmark_seconds, int spawn_local_worker_count,
const std::string& qps_server_target_override,
const std::string& credential_type,
const std::map<std::string, std::string>& per_worker_credential_types,
bool run_inproc, int32_t median_latency_collection_interval_millis) {
if (run_inproc) {
g_inproc_servers = new std::vector<grpc::testing::Server*>;
}
// Log everything from the driver
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
// ClientContext allocations (all are destroyed at scope exit)
list<ClientContext> contexts;
auto alloc_context = [](list<ClientContext>* contexts) {
contexts->emplace_back();
auto context = &contexts->back();
context->set_wait_for_ready(true);
return context;
};
// To be added to the result, containing the final configuration used for
// client and config (including host, etc.)
ClientConfig result_client_config;
// Get client, server lists; ignore if inproc test
auto workers = (!run_inproc) ? get_workers("QPS_WORKERS") : deque<string>();
ClientConfig client_config = initial_client_config;
// Spawn some local workers if desired
vector<unique_ptr<QpsWorker>> local_workers;
for (int i = 0; i < abs(spawn_local_worker_count); i++) {
// act as if we're a new test -- gets a good rng seed
static bool called_init = false;
if (!called_init) {
10 years ago
char args_buf[100];
strcpy(args_buf, "some-benchmark");
char* args[] = {args_buf};
grpc_test_init(1, args);
called_init = true;
}
char addr[256];
// we use port # of -1 to indicate inproc
int driver_port = (!run_inproc) ? grpc_pick_unused_port_or_die() : -1;
local_workers.emplace_back(new QpsWorker(driver_port, 0, credential_type));
sprintf(addr, "localhost:%d", driver_port);
if (spawn_local_worker_count < 0) {
workers.push_front(addr);
} else {
workers.push_back(addr);
}
}
GPR_ASSERT(!workers.empty());
// if num_clients is set to <=0, do dynamic sizing: all workers
// except for servers are clients
if (num_clients <= 0) {
num_clients = workers.size() - num_servers;
}
10 years ago
// TODO(ctiller): support running multiple configurations, and binpack
// client/server pairs
// to available workers
GPR_ASSERT(workers.size() >= num_clients + num_servers);
// Trim to just what we need
workers.resize(num_clients + num_servers);
// Start servers
std::vector<ServerData> servers(num_servers);
std::unordered_map<string, std::deque<int>> hosts_cores;
ChannelArguments channel_args;
for (size_t i = 0; i < num_servers; i++) {
gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
workers[i].c_str(), i);
if (!run_inproc) {
servers[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
workers[i],
GetCredType(workers[i], per_worker_credential_types, credential_type),
nullptr /* call creds */, {} /* interceptor creators */));
} else {
servers[i].stub = WorkerService::NewStub(
local_workers[i]->InProcessChannel(channel_args));
}
const ServerConfig& server_config = initial_server_config;
if (server_config.core_limit() != 0) {
gpr_log(GPR_ERROR,
"server config core limit is set but ignored by driver");
GPR_ASSERT(false);
}
ServerArgs args;
*args.mutable_setup() = server_config;
servers[i].stream = servers[i].stub->RunServer(alloc_context(&contexts));
if (!servers[i].stream->Write(args)) {
gpr_log(GPR_ERROR, "Could not write args to server %zu", i);
GPR_ASSERT(false);
}
ServerStatus init_status;
if (!servers[i].stream->Read(&init_status)) {
gpr_log(GPR_ERROR, "Server %zu did not yield initial status", i);
GPR_ASSERT(false);
}
if (run_inproc) {
std::string cli_target(INPROC_NAME_PREFIX);
cli_target += std::to_string(i);
client_config.add_server_targets(cli_target);
} else {
std::string host = get_host(workers[i]);
std::string cli_target =
grpc_core::JoinHostPort(host.c_str(), init_status.port());
client_config.add_server_targets(cli_target.c_str());
}
}
if (qps_server_target_override.length() > 0) {
// overriding the qps server target only makes since if there is <= 1
// servers
GPR_ASSERT(num_servers <= 1);
client_config.add_server_targets(qps_server_target_override);
}
client_config.set_median_latency_collection_interval_millis(
median_latency_collection_interval_millis);
// Targets are all set by now
result_client_config = client_config;
// Start clients
std::vector<ClientData> clients(num_clients);
size_t channels_allocated = 0;
for (size_t i = 0; i < num_clients; i++) {
const auto& worker = workers[i + num_servers];
gpr_log(GPR_INFO, "Starting client on %s (worker #%" PRIuPTR ")",
worker.c_str(), i + num_servers);
if (!run_inproc) {
clients[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
worker,
GetCredType(worker, per_worker_credential_types, credential_type),
nullptr /* call creds */, {} /* interceptor creators */));
} else {
clients[i].stub = WorkerService::NewStub(
local_workers[i + num_servers]->InProcessChannel(channel_args));
}
ClientConfig per_client_config = client_config;
if (initial_client_config.core_limit() != 0) {
gpr_log(GPR_ERROR, "client config core limit set but ignored");
GPR_ASSERT(false);
}
9 years ago
// Reduce channel count so that total channels specified is held regardless
// of the number of clients available
size_t num_channels =
9 years ago
(client_config.client_channels() - channels_allocated) /
(num_clients - i);
channels_allocated += num_channels;
9 years ago
gpr_log(GPR_DEBUG, "Client %" PRIdPTR " gets %" PRIdPTR " channels", i,
num_channels);
per_client_config.set_client_channels(num_channels);
ClientArgs args;
*args.mutable_setup() = per_client_config;
clients[i].stream = clients[i].stub->RunClient(alloc_context(&contexts));
if (!clients[i].stream->Write(args)) {
gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
GPR_ASSERT(false);
}
}
for (size_t i = 0; i < num_clients; i++) {
ClientStatus init_status;
if (!clients[i].stream->Read(&init_status)) {
gpr_log(GPR_ERROR, "Client %zu did not yield initial status", i);
GPR_ASSERT(false);
}
}
// Send an initial mark: clients can use this to know that everything is ready
// to start
gpr_log(GPR_INFO, "Initiating");
ServerArgs server_mark;
server_mark.mutable_mark()->set_reset(true);
ClientArgs client_mark;
client_mark.mutable_mark()->set_reset(true);
ServerStatus server_status;
ClientStatus client_status;
for (size_t i = 0; i < num_clients; i++) {
auto client = &clients[i];
if (!client->stream->Write(client_mark)) {
gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
GPR_ASSERT(false);
}
}
for (size_t i = 0; i < num_clients; i++) {
auto client = &clients[i];
if (!client->stream->Read(&client_status)) {
gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i);
GPR_ASSERT(false);
}
}
// Let everything warmup
gpr_log(GPR_INFO, "Warming up");
10 years ago
gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME);
10 years ago
gpr_sleep_until(
gpr_time_add(start, gpr_time_from_seconds(warmup_seconds, GPR_TIMESPAN)));
// Start a run
gpr_log(GPR_INFO, "Starting");
for (size_t i = 0; i < num_servers; i++) {
auto server = &servers[i];
if (!server->stream->Write(server_mark)) {
gpr_log(GPR_ERROR, "Couldn't write mark to server %zu", i);
GPR_ASSERT(false);
}
}
for (size_t i = 0; i < num_clients; i++) {
auto client = &clients[i];
if (!client->stream->Write(client_mark)) {
gpr_log(GPR_ERROR, "Couldn't write mark to client %zu", i);
GPR_ASSERT(false);
}
}
for (size_t i = 0; i < num_servers; i++) {
auto server = &servers[i];
if (!server->stream->Read(&server_status)) {
gpr_log(GPR_ERROR, "Couldn't get status from server %zu", i);
GPR_ASSERT(false);
}
}
for (size_t i = 0; i < num_clients; i++) {
auto client = &clients[i];
if (!client->stream->Read(&client_status)) {
gpr_log(GPR_ERROR, "Couldn't get status from client %zu", i);
GPR_ASSERT(false);
}
}
// Wait some time
gpr_log(GPR_INFO, "Running");
// Use gpr_sleep_until rather than this_thread::sleep_until to support
// compilers that don't work with this_thread
10 years ago
gpr_sleep_until(gpr_time_add(
start,
gpr_time_from_seconds(warmup_seconds + benchmark_seconds, GPR_TIMESPAN)));
8 years ago
gpr_timer_set_enabled(0);
// Finish a run
std::unique_ptr<ScenarioResult> result(new ScenarioResult);
Histogram merged_latencies;
std::unordered_map<int, int64_t> merged_statuses;
5 years ago
// For the case where clients lead the test such as UNARY and
// STREAMING_FROM_CLIENT, clients need to finish completely while a server
// is running to prevent the clients from being stuck while waiting for
// the result.
bool client_finish_first =
(client_config.rpc_type() != STREAMING_FROM_SERVER);
5 years ago
FinishClients(clients, client_mark);
if (!client_finish_first) {
5 years ago
FinishServers(servers, server_mark);
Remove out of context writes from H2. There is major improvement in production benchmarks (99p latency is halved). Microbenchmarks show up to 14% improvements: BM_PumpStreamClientToServer<InProcess>/16777216 [polls/iter:0 ] 15.6ms ± 4% 15.3ms ± 6% -2.32% (p=0.002 n=18+20) BM_PumpStreamServerToClient<InProcess>/0 [polls/iter:0 ] 1.23µs ± 1% 1.21µs ± 1% -1.54% (p=0.000 n=18+18) BM_PumpStreamServerToClient<InProcess>/1 [polls/iter:0 ] 1.33µs ± 1% 1.31µs ± 1% -1.47% (p=0.000 n=16+19) BM_PumpStreamServerToClient<InProcess>/8 [polls/iter:0 ] 1.33µs ± 1% 1.31µs ± 1% -1.47% (p=0.000 n=17+20) BM_PumpStreamServerToClient<InProcess>/64 [polls/iter:0 ] 1.37µs ± 1% 1.36µs ± 1% -0.86% (p=0.000 n=17+20) BM_PumpStreamServerToClient<InProcess>/4096 [polls/iter:0 ] 2.40µs ± 3% 2.35µs ± 3% -2.43% (p=0.000 n=18+19) BM_PumpStreamServerToClient<MinInProcess>/0 [polls/iter:0 ] 1.21µs ± 0% 1.20µs ± 1% -0.87% (p=0.000 n=15+17) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/1/2 [polls/iter:12.0002 ] 62.3µs ± 3% 61.2µs ± 1% -1.78% (p=0.007 n=7+10) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/8/2 [polls/iter:12.0002 ] 62.3µs ± 2% 61.4µs ± 1% -1.39% (p=0.016 n=11+9) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/64/2 [polls/iter:12.0002 ] 65.1µs ± 2% 63.9µs ± 2% -1.76% (p=0.005 n=7+9) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/512/1 [polls/iter:8.00012 ] 47.1µs ± 1% 46.3µs ± 1% -1.77% (p=0.008 n=5+5) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/4096/2 [polls/iter:12 ] 71.3µs ± 1% 70.2µs ± 1% -1.51% (p=0.008 n=5+5) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/0/0 [polls/iter:0 ] 5.73µs ± 2% 5.68µs ± 1% -0.96% (p=0.000 n=20+19) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/1/1 [polls/iter:0 ] 8.74µs ± 1% 8.69µs ± 1% -0.53% (p=0.002 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/1/2 [polls/iter:0 ] 11.6µs ± 1% 11.5µs ± 1% -0.57% (p=0.000 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/8/1 [polls/iter:0 ] 8.72µs ± 1% 8.70µs ± 1% -0.25% (p=0.038 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/64/1 [polls/iter:0 ] 8.87µs ± 1% 8.83µs ± 1% -0.41% (p=0.003 n=20+19) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/64/2 [polls/iter:0 ] 11.8µs ± 1% 11.8µs ± 1% -0.62% (p=0.000 n=20+19) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/512/1 [polls/iter:0 ] 9.16µs ± 1% 9.10µs ± 1% -0.60% (p=0.000 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/512/2 [polls/iter:0 ] 12.3µs ± 1% 12.2µs ± 1% -0.70% (p=0.000 n=20+18) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/32768/2 [polls/iter:0 ] 50.0µs ± 1% 50.2µs ± 2% +0.47% (p=0.026 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/2097152/1 [polls/iter:0 ] 2.14ms ± 2% 1.89ms ± 1% -11.52% (p=0.000 n=18+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/2097152/2 [polls/iter:0 ] 4.27ms ± 2% 3.78ms ± 1% -11.41% (p=0.000 n=18+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/16777216/1 [polls/iter:0 ] 32.9ms ± 6% 29.2ms ± 6% -11.34% (p=0.000 n=19+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/16777216/2 [polls/iter:0 ] 64.7ms ± 4% 58.0ms ± 5% -10.33% (p=0.000 n=17+19) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/134217728/1 [polls/iter:0 ] 326ms ± 2% 323ms ± 2% -1.08% (p=0.001 n=20+20) BM_StreamingPingPong<InProcess, NoOpMutator, NoOpMutator>/134217728/2 [polls/iter:0 ] 652ms ± 2% 644ms ± 2% -1.19% (p=0.001 n=20+20) BM_StreamingPingPongMsgs<TCP, NoOpMutator, NoOpMutator>/8 [polls/iter:4.00007 ] 16.3µs ± 2% 16.0µs ± 1% -1.77% (p=0.038 n=6+4) BM_StreamingPingPongMsgs<InProcess, NoOpMutator, NoOpMutator>/2097152 [polls/iter:0 ] 2.02ms ± 7% 1.88ms ± 1% -6.63% (p=0.002 n=20+20) BM_StreamingPingPongMsgs<InProcess, NoOpMutator, NoOpMutator>/16777216 [polls/iter:0 ] 32.3ms ± 6% 29.3ms ± 5% -9.55% (p=0.000 n=19+20) BM_StreamingPingPongMsgs<InProcess, NoOpMutator, NoOpMutator>/134217728 [polls/iter:0 ] 327ms ± 1% 323ms ± 1% -1.22% (p=0.000 n=20+20) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/0/2 [polls/iter:12.0001 ] 59.5µs ± 2% 58.6µs ± 1% -1.48% (p=0.014 n=9+9) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/1/2 [polls/iter:12.0001 ] 60.9µs ± 2% 60.0µs ± 2% -1.43% (p=0.023 n=9+7) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/8/2 [polls/iter:12.0002 ] 61.4µs ± 2% 60.0µs ± 1% -2.27% (p=0.012 n=7+4) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/0 [polls/iter:0 ] 5.71µs ± 2% 5.62µs ± 2% -1.51% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/1 [polls/iter:0 ] 8.44µs ± 1% 8.39µs ± 1% -0.65% (p=0.001 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/2 [polls/iter:0 ] 11.0µs ± 1% 11.0µs ± 1% -0.26% (p=0.037 n=19+19) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/1/1 [polls/iter:0 ] 8.68µs ± 1% 8.61µs ± 1% -0.78% (p=0.000 n=19+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/1/2 [polls/iter:0 ] 11.5µs ± 1% 11.4µs ± 1% -0.24% (p=0.044 n=19+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/8/1 [polls/iter:0 ] 8.72µs ± 1% 8.62µs ± 1% -1.18% (p=0.000 n=20+19) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/64/1 [polls/iter:0 ] 8.83µs ± 1% 8.76µs ± 1% -0.87% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/64/2 [polls/iter:0 ] 11.7µs ± 1% 11.7µs ± 1% -0.47% (p=0.014 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/512/1 [polls/iter:0 ] 9.13µs ± 1% 9.05µs ± 1% -0.85% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/512/2 [polls/iter:0 ] 12.3µs ± 1% 12.2µs ± 1% -0.55% (p=0.004 n=20+19) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/4096/1 [polls/iter:0 ] 11.1µs ± 1% 11.1µs ± 1% -0.55% (p=0.010 n=18+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/2097152/1 [polls/iter:0 ] 2.08ms ± 9% 1.90ms ± 1% -8.68% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/2097152/2 [polls/iter:0 ] 4.14ms ± 9% 3.77ms ± 1% -8.95% (p=0.000 n=20+17) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/16777216/1 [polls/iter:0 ] 32.1ms ± 7% 28.6ms ± 3% -10.87% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/16777216/2 [polls/iter:0 ] 64.0ms ± 7% 57.4ms ± 2% -10.28% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/134217728/1 [polls/iter:0 ] 326ms ± 2% 321ms ± 2% -1.40% (p=0.000 n=20+20) BM_StreamingPingPong<MinInProcess, NoOpMutator, NoOpMutator>/134217728/2 [polls/iter:0 ] 652ms ± 2% 643ms ± 2% -1.39% (p=0.000 n=20+20) BM_StreamingPingPongMsgs<MinInProcess, NoOpMutator, NoOpMutator>/8 [polls/iter:0 ] 2.61µs ± 0% 2.62µs ± 1% +0.32% (p=0.018 n=17+20) BM_StreamingPingPongMsgs<MinInProcess, NoOpMutator, NoOpMutator>/32768 [polls/iter:0 ] 21.7µs ± 1% 21.9µs ± 2% +0.63% (p=0.041 n=19+20) BM_StreamingPingPongMsgs<MinInProcess, NoOpMutator, NoOpMutator>/2097152 [polls/iter:0 ] 2.04ms ± 8% 1.89ms ± 2% -7.42% (p=0.006 n=20+20) BM_StreamingPingPongMsgs<MinInProcess, NoOpMutator, NoOpMutator>/16777216 [polls/iter:0 ] 32.5ms ± 7% 28.5ms ± 2% -12.22% (p=0.000 n=20+20) BM_StreamingPingPongMsgs<MinInProcess, NoOpMutator, NoOpMutator>/134217728 [polls/iter:0 ] 327ms ± 1% 320ms ± 2% -2.33% (p=0.000 n=17+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/0/0/0 [polls/iter:0 ] 5.37µs ± 1% 5.31µs ± 2% -1.06% (p=0.000 n=19+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/0/0/1 [polls/iter:0 ] 5.37µs ± 1% 5.31µs ± 1% -1.02% (p=0.000 n=20+18) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/0/1/0 [polls/iter:0 ] 7.66µs ± 1% 7.59µs ± 1% -0.91% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/0/2/0 [polls/iter:0 ] 10.4µs ± 1% 10.3µs ± 1% -0.99% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/1/1/0 [polls/iter:0 ] 7.93µs ± 2% 7.82µs ± 1% -1.31% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/1/2/0 [polls/iter:0 ] 10.8µs ± 1% 10.7µs ± 1% -0.96% (p=0.000 n=20+17) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/1/2/1 [polls/iter:0 ] 10.4µs ± 1% 10.4µs ± 1% -0.30% (p=0.038 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/8/1/0 [polls/iter:0 ] 7.91µs ± 1% 7.83µs ± 1% -1.03% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/8/2/0 [polls/iter:0 ] 10.8µs ± 1% 10.7µs ± 1% -0.45% (p=0.003 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/64/1/0 [polls/iter:0 ] 8.02µs ± 1% 7.96µs ± 1% -0.70% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/64/2/0 [polls/iter:0 ] 11.0µs ± 1% 10.9µs ± 1% -0.56% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/512/1/0 [polls/iter:0 ] 8.31µs ± 1% 8.24µs ± 1% -0.84% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/512/2/0 [polls/iter:0 ] 11.5µs ± 1% 11.4µs ± 1% -0.66% (p=0.000 n=18+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/512/2/1 [polls/iter:0 ] 11.1µs ± 1% 11.1µs ± 1% -0.33% (p=0.016 n=19+18) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/4096/1/0 [polls/iter:0 ] 10.4µs ± 2% 10.3µs ± 2% -0.70% (p=0.015 n=19+19) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/2097152/1/0 [polls/iter:0 ] 2.02ms ± 7% 1.90ms ± 2% -6.22% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/2097152/2/0 [polls/iter:0 ] 4.03ms ± 7% 3.79ms ± 1% -6.14% (p=0.001 n=20+18) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/2097152/1/1 [polls/iter:0 ] 2.02ms ± 7% 1.90ms ± 1% -6.13% (p=0.004 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/2097152/2/1 [polls/iter:0 ] 4.03ms ± 7% 3.78ms ± 1% -6.20% (p=0.004 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/16777216/1/0 [polls/iter:0 ] 32.3ms ± 6% 28.5ms ± 3% -11.64% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/16777216/2/0 [polls/iter:0 ] 64.8ms ± 6% 57.0ms ± 3% -12.10% (p=0.000 n=19+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/16777216/1/1 [polls/iter:0 ] 32.2ms ± 7% 28.5ms ± 3% -11.41% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/16777216/2/1 [polls/iter:0 ] 63.8ms ± 6% 57.0ms ± 3% -10.55% (p=0.000 n=19+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/134217728/1/0 [polls/iter:0 ] 327ms ± 1% 321ms ± 2% -1.79% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/134217728/2/0 [polls/iter:0 ] 653ms ± 1% 641ms ± 2% -1.83% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/134217728/1/1 [polls/iter:0 ] 327ms ± 1% 321ms ± 2% -1.86% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<InProcess, NoOpMutator, NoOpMutator>/134217728/2/1 [polls/iter:0 ] 654ms ± 1% 640ms ± 2% -2.00% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/0/0/0 [polls/iter:0 ] 5.28µs ± 1% 5.23µs ± 2% -0.84% (p=0.001 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/0/0/1 [polls/iter:0 ] 5.27µs ± 2% 5.24µs ± 1% -0.68% (p=0.004 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/0/1/0 [polls/iter:0 ] 7.59µs ± 2% 7.53µs ± 1% -0.82% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/0/2/0 [polls/iter:0 ] 10.2µs ± 1% 10.2µs ± 1% -0.58% (p=0.001 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/0/1/1 [polls/iter:0 ] 7.16µs ± 1% 7.11µs ± 1% -0.76% (p=0.000 n=20+19) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/1/1/0 [polls/iter:0 ] 7.83µs ± 1% 7.75µs ± 1% -1.00% (p=0.000 n=20+18) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/1/2/0 [polls/iter:0 ] 10.7µs ± 1% 10.6µs ± 1% -0.66% (p=0.000 n=19+18) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/1/2/1 [polls/iter:0 ] 10.3µs ± 1% 10.2µs ± 1% -0.40% (p=0.003 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/8/1/0 [polls/iter:0 ] 7.82µs ± 1% 7.76µs ± 1% -0.72% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/8/2/0 [polls/iter:0 ] 10.7µs ± 1% 10.6µs ± 1% -0.46% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/64/1/0 [polls/iter:0 ] 7.94µs ± 1% 7.89µs ± 1% -0.62% (p=0.003 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/64/1/1 [polls/iter:0 ] 7.54µs ± 2% 7.88µs ± 6% +4.56% (p=0.000 n=19+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/512/1/0 [polls/iter:0 ] 8.24µs ± 1% 8.18µs ± 1% -0.79% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/512/2/0 [polls/iter:0 ] 11.4µs ± 1% 11.3µs ± 1% -0.58% (p=0.000 n=19+19) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/512/1/1 [polls/iter:0 ] 7.83µs ± 1% 8.13µs ± 5% +3.85% (p=0.002 n=17+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/512/2/1 [polls/iter:0 ] 11.0µs ± 1% 11.0µs ± 1% -0.31% (p=0.021 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/4096/1/0 [polls/iter:0 ] 10.3µs ± 1% 10.2µs ± 3% -0.96% (p=0.003 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/4096/2/0 [polls/iter:0 ] 15.4µs ± 1% 15.3µs ± 2% -0.54% (p=0.035 n=19+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/2097152/1/0 [polls/iter:0 ] 2.02ms ± 7% 1.90ms ± 1% -6.05% (p=0.003 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/2097152/2/0 [polls/iter:0 ] 4.03ms ± 7% 3.79ms ± 1% -6.10% (p=0.004 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/2097152/1/1 [polls/iter:0 ] 2.02ms ± 7% 1.90ms ± 1% -6.13% (p=0.001 n=20+19) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/2097152/2/1 [polls/iter:0 ] 4.03ms ± 7% 3.78ms ± 1% -6.11% (p=0.001 n=20+19) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/16777216/1/0 [polls/iter:0 ] 32.3ms ± 7% 28.5ms ± 3% -11.76% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/16777216/2/0 [polls/iter:0 ] 64.4ms ± 4% 57.2ms ± 3% -11.20% (p=0.000 n=16+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/16777216/1/1 [polls/iter:0 ] 32.2ms ± 6% 28.6ms ± 3% -11.18% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/16777216/2/1 [polls/iter:0 ] 63.8ms ± 7% 57.1ms ± 2% -10.47% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/134217728/1/0 [polls/iter:0 ] 327ms ± 1% 320ms ± 2% -1.95% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/134217728/2/0 [polls/iter:0 ] 654ms ± 1% 641ms ± 1% -2.01% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/134217728/1/1 [polls/iter:0 ] 327ms ± 1% 320ms ± 2% -2.17% (p=0.000 n=20+20) BM_StreamingPingPongWithCoalescingApi<MinInProcess, NoOpMutator, NoOpMutator>/134217728/2/1 [polls/iter:0 ] 655ms ± 1% 641ms ± 1% -2.13% (p=0.000 n=20+20) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/0/2 [polls/iter:12.0001 ] 60.9µs ± 3% 59.5µs ± 1% -2.28% (p=0.002 n=8+7) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/64/2 [polls/iter:12.0001 ] 64.1µs ± 1% 63.3µs ± 0% -1.32% (p=0.016 n=5+4) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/8/2 [polls/iter:12.0001 ] 61.7µs ± 3% 60.4µs ± 1% -2.10% (p=0.032 n=5+4) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/64/1 [polls/iter:8.00012 ] 47.0µs ± 1% 46.2µs ± 2% -1.84% (p=0.048 n=3+6) BM_StreamingPingPongMsgs<TCP, NoOpMutator, NoOpMutator>/0 [polls/iter:4.00008 ] 15.5µs ± 2% 15.3µs ± 2% -1.53% (p=0.048 n=5+7) BM_StreamingPingPong<TCP, NoOpMutator, NoOpMutator>/8/2 [polls/iter:12 ] 63.9µs ± 4% 61.7µs ± 1% -3.44% (p=0.032 n=5+4) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/8/2 [polls/iter:12 ] 61.9µs ± 1% 60.9µs ± 2% -1.65% (p=0.032 n=5+5) BM_StreamingPingPong<MinTCP, NoOpMutator, NoOpMutator>/32768/1 [polls/iter:8.00019 ] 71.7µs ± 1% 70.9µs ± 0% -1.13% (p=0.038 n=6+4) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/8/8 [polls/iter:0 ] 6.70µs ± 8% 6.62µs ± 7% -1.12% (p=0.049 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/64/0 [polls/iter:0 ] 6.61µs ± 6% 6.47µs ± 1% -2.15% (p=0.001 n=20+17) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/64 [polls/iter:0 ] 6.59µs ± 6% 6.44µs ± 3% -2.17% (p=0.000 n=19+18) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/512 [polls/iter:0 ] 6.81µs ± 6% 6.70µs ± 7% -1.61% (p=0.008 n=20+19) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/4096/0 [polls/iter:0 ] 7.76µs ± 3% 7.68µs ± 2% -0.98% (p=0.011 n=18+19) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/4096 [polls/iter:0 ] 7.75µs ± 2% 7.71µs ± 2% -0.60% (p=0.030 n=19+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/32768/0 [polls/iter:0 ] 15.9µs ± 1% 15.9µs ± 1% -0.20% (p=0.022 n=19+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/32768 [polls/iter:0 ] 16.3µs ± 1% 16.2µs ± 2% -0.54% (p=0.021 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/262144/0 [polls/iter:0 ] 86.3µs ± 1% 86.0µs ± 1% -0.42% (p=0.003 n=17+18) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/16777216/0 [polls/iter:0 ] 17.8ms ± 6% 15.6ms ± 3% -12.50% (p=0.000 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/16777216 [polls/iter:0 ] 16.1ms ± 8% 13.9ms ± 2% -14.01% (p=0.000 n=20+17) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/16777216/16777216 [polls/iter:0 ] 35.4ms ± 6% 31.2ms ± 2% -11.76% (p=0.000 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/134217728/0 [polls/iter:0 ] 178ms ± 2% 176ms ± 1% -1.60% (p=0.000 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/0/134217728 [polls/iter:0 ] 160ms ± 2% 158ms ± 1% -1.66% (p=0.000 n=20+20) BM_UnaryPingPong<InProcess, NoOpMutator, NoOpMutator>/134217728/134217728 [polls/iter:0 ] 341ms ± 2% 336ms ± 1% -1.53% (p=0.000 n=20+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/1/0 [polls/iter:0 ] 6.58µs ± 5% 6.28µs ± 2% -4.44% (p=0.000 n=20+17) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/1 [polls/iter:0 ] 6.54µs ± 5% 6.24µs ± 1% -4.59% (p=0.000 n=20+17) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/1/1 [polls/iter:0 ] 6.64µs ± 8% 6.41µs ± 3% -3.54% (p=0.000 n=20+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/8/0 [polls/iter:0 ] 6.50µs ± 8% 6.25µs ± 1% -3.85% (p=0.000 n=20+17) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/8 [polls/iter:0 ] 6.42µs ± 3% 6.23µs ± 1% -3.00% (p=0.000 n=19+16) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/8/8 [polls/iter:0 ] 6.64µs ± 8% 6.36µs ± 1% -4.23% (p=0.000 n=20+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/64/0 [polls/iter:0 ] 6.43µs ± 2% 6.36µs ± 1% -1.20% (p=0.000 n=18+18) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/64 [polls/iter:0 ] 6.48µs ± 4% 6.34µs ± 2% -2.21% (p=0.000 n=19+17) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/64/64 [polls/iter:0 ] 6.59µs ± 2% 6.53µs ± 1% -0.93% (p=0.000 n=17+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/512/0 [polls/iter:0 ] 6.61µs ± 2% 6.49µs ± 1% -1.79% (p=0.000 n=18+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/512 [polls/iter:0 ] 6.62µs ± 2% 6.51µs ± 1% -1.56% (p=0.000 n=18+18) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/512/512 [polls/iter:0 ] 6.87µs ± 1% 6.83µs ± 2% -0.49% (p=0.022 n=16+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/4096/0 [polls/iter:0 ] 7.66µs ± 2% 7.58µs ± 1% -1.09% (p=0.000 n=16+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/4096 [polls/iter:0 ] 7.71µs ± 4% 7.59µs ± 1% -1.58% (p=0.000 n=17+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/32768/0 [polls/iter:0 ] 15.9µs ± 1% 15.8µs ± 1% -0.36% (p=0.002 n=18+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/32768/32768 [polls/iter:0 ] 25.5µs ± 1% 25.7µs ± 2% +0.85% (p=0.002 n=17+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/262144/262144 [polls/iter:0 ] 168µs ± 2% 169µs ± 2% +0.61% (p=0.048 n=17+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/16777216/0 [polls/iter:0 ] 17.8ms ±10% 15.5ms ± 5% -13.12% (p=0.000 n=20+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/16777216 [polls/iter:0 ] 16.2ms ± 6% 13.8ms ± 3% -14.41% (p=0.000 n=20+20) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/16777216/16777216 [polls/iter:0 ] 35.3ms ± 7% 31.2ms ± 2% -11.56% (p=0.000 n=20+19) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/134217728/0 [polls/iter:0 ] 179ms ± 1% 176ms ± 1% -1.67% (p=0.000 n=18+18) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/0/134217728 [polls/iter:0 ] 161ms ± 1% 158ms ± 1% -1.80% (p=0.000 n=19+18) BM_UnaryPingPong<MinInProcess, NoOpMutator, NoOpMutator>/134217728/134217728 [polls/iter:0 ] 342ms ± 1% 336ms ± 1% -1.70% (p=0.000 n=20+19) BM_UnaryPingPong<InProcess, Client_AddMetadata<RandomBinaryMetadata<10>, 1>, NoOpMutator>/0/0 [polls/iter:0 ] 6.99µs ± 1% 7.20µs ± 5% +3.11% (p=0.026 n=17+20) BM_UnaryPingPong<InProcess, Client_AddMetadata<RandomBinaryMetadata<31>, 1>, NoOpMutator>/0/0 [polls/iter:0 ] 7.19µs ± 5% 7.34µs ± 4% +2.13% (p=0.024 n=20+20) BM_UnaryPingPong<InProcess, Client_AddMetadata<RandomBinaryMetadata<100>, 1>, NoOpMutator>/0/0 [polls/iter:0 ] 7.15µs ± 2% 7.31µs ± 5% +2.20% (p=0.028 n=16+20) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomBinaryMetadata<10>, 1>>/0/0 [polls/iter:0 ] 7.11µs ± 5% 6.93µs ± 4% -2.52% (p=0.000 n=19+17) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomBinaryMetadata<31>, 1>>/0/0 [polls/iter:0 ] 7.21µs ± 5% 7.08µs ± 2% -1.83% (p=0.011 n=20+18) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomBinaryMetadata<100>, 1>>/0/0 [polls/iter:0 ] 7.19µs ± 2% 7.08µs ± 2% -1.60% (p=0.000 n=17+18) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomAsciiMetadata<10>, 1>>/0/0 [polls/iter:0 ] 7.06µs ± 3% 7.00µs ± 6% -0.79% (p=0.005 n=18+19) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomAsciiMetadata<31>, 1>>/0/0 [polls/iter:0 ] 7.18µs ± 2% 7.05µs ± 2% -1.71% (p=0.000 n=18+17) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomAsciiMetadata<100>, 1>>/0/0 [polls/iter:0 ] 7.36µs ± 2% 7.23µs ± 1% -1.78% (p=0.000 n=18+18) BM_UnaryPingPong<InProcess, NoOpMutator, Server_AddInitialMetadata<RandomAsciiMetadata<10>, 100>>/0/0 [polls/iter:0 ] 75.7µs ± 2% 74.8µs ± 2% -1.12% (p=0.001 n=18+19) BM_UnaryPingPong<TCP, NoOpMutator, NoOpMutator>/64/0 [polls/iter:3.0001 ] 25.6µs ± 1% 24.7µs ± 1% -3.61% (p=0.036 n=3+5) BM_UnaryPingPong<MinTCP, NoOpMutator, NoOpMutator>/64/64 [polls/iter:3.00008 ] 25.0µs ± 3% 24.5µs ± 1% -1.88% (p=0.030 n=7+5) BM_UnaryPingPong<TCP, NoOpMutator, NoOpMutator>/0/262144 [polls/iter:3.00034 ] 169µs ± 1% 167µs ± 1% -1.11% (p=0.016 n=4+5) BM_UnaryPingPong<MinTCP, NoOpMutator, NoOpMutator>/0/8 [polls/iter:3.00006 ] 24.1µs ± 3% 23.4µs ± 1% -3.03% (p=0.032 n=4+5) BM_UnaryPingPong<TCP, NoOpMutator, NoOpMutator>/0/0 [polls/iter:3.00008 ] 24.2µs ± 3% 23.3µs ± 0% -3.46% (p=0.016 n=5+4)
5 years ago
}
5 years ago
ReceiveFinalStatusFromClients(clients, merged_latencies, merged_statuses,
*result);
ShutdownClients(clients, *result);
if (client_finish_first) {
FinishServers(servers, server_mark);
}
5 years ago
ReceiveFinalStatusFromServer(servers, *result);
ShutdownServers(servers, *result);
if (g_inproc_servers != nullptr) {
delete g_inproc_servers;
}
merged_latencies.FillProto(result->mutable_latencies());
for (std::unordered_map<int, int64_t>::iterator it = merged_statuses.begin();
it != merged_statuses.end(); ++it) {
RequestResultCount* rrc = result->add_request_results();
rrc->set_status_code(it->first);
rrc->set_count(it->second);
}
postprocess_scenario_result(result.get());
return result;
}
bool RunQuit(
const std::string& credential_type,
const std::map<std::string, std::string>& per_worker_credential_types) {
// Get client, server lists
bool result = true;
auto workers = get_workers("QPS_WORKERS");
if (workers.empty()) {
return false;
}
for (size_t i = 0; i < workers.size(); i++) {
auto stub = WorkerService::NewStub(grpc::CreateTestChannel(
workers[i],
GetCredType(workers[i], per_worker_credential_types, credential_type),
nullptr /* call creds */, {} /* interceptor creators */));
Void phony;
grpc::ClientContext ctx;
ctx.set_wait_for_ready(true);
Status s = stub->QuitWorker(&ctx, phony, &phony);
if (!s.ok()) {
8 years ago
gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
s.error_message().c_str());
result = false;
}
}
return result;
}
} // namespace testing
} // namespace grpc