Merge pull request #4918 from vjpai/dynamic_sizing2

Allow dynamic sizing of async test thread pools (to core count) and number of client workers
pull/4925/head
Yang Gao 9 years ago
commit d991735cb1
  1. 28
      test/cpp/qps/client_async.cc
  2. 16
      test/cpp/qps/driver.cc
  3. 18
      test/cpp/qps/server_async.cc

@ -46,13 +46,14 @@
#include <grpc++/client_context.h>
#include <grpc++/generic/generic_stub.h>
#include <grpc/grpc.h>
#include <grpc/support/cpu.h>
#include <grpc/support/histogram.h>
#include <grpc/support/log.h>
#include "src/proto/grpc/testing/services.grpc.pb.h"
#include "test/cpp/qps/client.h"
#include "test/cpp/qps/timer.h"
#include "test/cpp/util/create_test_channel.h"
#include "src/proto/grpc/testing/services.grpc.pb.h"
namespace grpc {
namespace testing {
@ -164,14 +165,15 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
std::function<std::unique_ptr<StubType>(std::shared_ptr<Channel>)>
create_stub)
: ClientImpl<StubType, RequestType>(config, create_stub),
num_async_threads_(NumThreads(config)),
channel_lock_(new std::mutex[config.client_channels()]),
contexts_(config.client_channels()),
max_outstanding_per_channel_(config.outstanding_rpcs_per_channel()),
channel_count_(config.client_channels()),
pref_channel_inc_(config.async_client_threads()) {
SetupLoadTest(config, config.async_client_threads());
pref_channel_inc_(num_async_threads_) {
SetupLoadTest(config, num_async_threads_);
for (int i = 0; i < config.async_client_threads(); i++) {
for (int i = 0; i < num_async_threads_; i++) {
cli_cqs_.emplace_back(new CompletionQueue);
if (!closed_loop_) {
rpc_deadlines_.emplace_back();
@ -324,6 +326,9 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
return true;
}
protected:
int num_async_threads_;
private:
class boolean { // exists only to avoid data-race on vector<bool>
public:
@ -338,6 +343,15 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
private:
bool val_;
};
static int NumThreads(const ClientConfig& config) {
int num_threads = config.async_client_threads();
if (num_threads <= 0) { // Use dynamic sizing
num_threads = gpr_cpu_num_cores();
gpr_log(GPR_INFO, "Sizing client server to %d threads", num_threads);
}
return num_threads;
}
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
std::vector<deadline_list> rpc_deadlines_; // per thread deadlines
@ -363,7 +377,7 @@ class AsyncUnaryClient GRPC_FINAL
public:
explicit AsyncUnaryClient(const ClientConfig& config)
: AsyncClient(config, SetupCtx, BenchmarkStubCreator) {
StartThreads(config.async_client_threads());
StartThreads(num_async_threads_);
}
~AsyncUnaryClient() GRPC_OVERRIDE { EndThreads(); }
@ -461,7 +475,7 @@ class AsyncStreamingClient GRPC_FINAL
// async streaming currently only supports closed loop
GPR_ASSERT(closed_loop_);
StartThreads(config.async_client_threads());
StartThreads(num_async_threads_);
}
~AsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }
@ -566,7 +580,7 @@ class GenericAsyncStreamingClient GRPC_FINAL
// async streaming currently only supports closed loop
GPR_ASSERT(closed_loop_);
StartThreads(config.async_client_threads());
StartThreads(num_async_threads_);
}
~GenericAsyncStreamingClient() GRPC_OVERRIDE { EndThreads(); }

@ -31,24 +31,24 @@
*
*/
#include <deque>
#include <list>
#include <thread>
#include <deque>
#include <vector>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/host_port.h>
#include <grpc++/client_context.h>
#include <grpc++/create_channel.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include "src/core/support/env.h"
#include "src/proto/grpc/testing/services.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/histogram.h"
#include "test/cpp/qps/qps_worker.h"
#include "src/proto/grpc/testing/services.grpc.pb.h"
using std::list;
using std::thread;
@ -142,6 +142,12 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
}
// if num_clients is set to <=0, do dynamic sizing: all workers
// except for servers are clients
if (num_clients <= 0) {
num_clients = workers.size() - num_servers;
}
// TODO(ctiller): support running multiple configurations, and binpack
// client/server pairs
// to available workers

@ -50,8 +50,8 @@
#include <grpc/support/log.h>
#include <gtest/gtest.h>
#include "test/cpp/qps/server.h"
#include "src/proto/grpc/testing/services.grpc.pb.h"
#include "test/cpp/qps/server.h"
namespace grpc {
namespace testing {
@ -85,7 +85,13 @@ class AsyncQpsServerTest : public Server {
register_service(&builder, &async_service_);
for (int i = 0; i < config.async_server_threads(); i++) {
int num_threads = config.async_server_threads();
if (num_threads <= 0) { // dynamic sizing
num_threads = cores();
gpr_log(GPR_INFO, "Sizing async server to %d threads", num_threads);
}
for (int i = 0; i < num_threads; i++) {
srv_cqs_.emplace_back(builder.AddCompletionQueue());
}
@ -96,8 +102,8 @@ class AsyncQpsServerTest : public Server {
auto process_rpc_bound =
std::bind(process_rpc, config.payload_config(), _1, _2);
for (int i = 0; i < 10000 / config.async_server_threads(); i++) {
for (int j = 0; j < config.async_server_threads(); j++) {
for (int i = 0; i < 10000 / num_threads; i++) {
for (int j = 0; j < num_threads; j++) {
if (request_unary_function) {
auto request_unary =
std::bind(request_unary_function, &async_service_, _1, _2, _3,
@ -115,10 +121,10 @@ class AsyncQpsServerTest : public Server {
}
}
for (int i = 0; i < config.async_server_threads(); i++) {
for (int i = 0; i < num_threads; i++) {
shutdown_state_.emplace_back(new PerThreadShutdownState());
}
for (int i = 0; i < config.async_server_threads(); i++) {
for (int i = 0; i < num_threads; i++) {
threads_.emplace_back(&AsyncQpsServerTest::ThreadFunc, this, i);
}
}

Loading…
Cancel
Save