|
|
|
@ -32,6 +32,7 @@ |
|
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
#include <cassert> |
|
|
|
|
#include <chrono> |
|
|
|
|
#include <memory> |
|
|
|
|
#include <mutex> |
|
|
|
|
#include <string> |
|
|
|
@ -57,6 +58,7 @@ |
|
|
|
|
#include "test/cpp/qps/client.h" |
|
|
|
|
#include "test/cpp/qps/qpstest.grpc.pb.h" |
|
|
|
|
#include "test/cpp/qps/histogram.h" |
|
|
|
|
#include "test/cpp/qps/interarrival.h" |
|
|
|
|
#include "test/cpp/qps/timer.h" |
|
|
|
|
|
|
|
|
|
namespace grpc { |
|
|
|
@ -64,17 +66,66 @@ namespace testing { |
|
|
|
|
|
|
|
|
|
class SynchronousClient : public Client { |
|
|
|
|
public: |
|
|
|
|
SynchronousClient(const ClientConfig& config) : Client(config) { |
|
|
|
|
SynchronousClient(const ClientConfig& config) : Client(config), |
|
|
|
|
interarrival_timer_() { |
|
|
|
|
num_threads_ = |
|
|
|
|
config.outstanding_rpcs_per_channel() * config.client_channels(); |
|
|
|
|
responses_.resize(num_threads_); |
|
|
|
|
|
|
|
|
|
// Now sort out the load test type
|
|
|
|
|
if (config.load_type() == CLOSED_LOOP) { |
|
|
|
|
closed_loop_ = true; |
|
|
|
|
} |
|
|
|
|
else { |
|
|
|
|
closed_loop_ = false; |
|
|
|
|
|
|
|
|
|
std::unique_ptr<RandomDist> random_dist; |
|
|
|
|
auto& load = config.load_parameters(); |
|
|
|
|
switch (config.load_type()) { |
|
|
|
|
case POISSON: |
|
|
|
|
random_dist.reset |
|
|
|
|
(new ExpDist(load.poisson().offered_load()/num_threads_)); |
|
|
|
|
case UNIFORM: |
|
|
|
|
random_dist.reset |
|
|
|
|
(new UniformDist(load.uniform().interarrival_lo()*num_threads_, |
|
|
|
|
load.uniform().interarrival_hi()*num_threads_)); |
|
|
|
|
case DETERMINISTIC: |
|
|
|
|
random_dist.reset |
|
|
|
|
(new DetDist(num_threads_/load.determ().offered_load())); |
|
|
|
|
case PARETO: |
|
|
|
|
random_dist.reset |
|
|
|
|
(new ParetoDist(load.pareto().interarrival_base()*num_threads_, |
|
|
|
|
load.pareto().alpha())); |
|
|
|
|
default: |
|
|
|
|
GPR_ASSERT(false); |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
interarrival_timer_.init(*random_dist, num_threads_); |
|
|
|
|
for (size_t i = 0; i<num_threads_; i++) { |
|
|
|
|
next_time_.push_back(std::chrono::high_resolution_clock::now() |
|
|
|
|
+ interarrival_timer_(i)); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
virtual ~SynchronousClient() { EndThreads(); } |
|
|
|
|
|
|
|
|
|
protected: |
|
|
|
|
void WaitToIssue(int thread_idx) { |
|
|
|
|
if (!closed_loop_) { |
|
|
|
|
std::this_thread::sleep_until(next_time_[thread_idx]); |
|
|
|
|
next_time_[thread_idx] += interarrival_timer_(thread_idx); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
size_t num_threads_; |
|
|
|
|
std::vector<SimpleResponse> responses_; |
|
|
|
|
private: |
|
|
|
|
bool closed_loop_; |
|
|
|
|
InterarrivalTimer interarrival_timer_; |
|
|
|
|
std::vector<std::chrono::time_point |
|
|
|
|
<std::chrono::high_resolution_clock>> next_time_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { |
|
|
|
@ -82,8 +133,9 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient { |
|
|
|
|
SynchronousUnaryClient(const ClientConfig& config): |
|
|
|
|
SynchronousClient(config) {StartThreads(num_threads_);} |
|
|
|
|
~SynchronousUnaryClient() {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { |
|
|
|
|
WaitToIssue(thread_idx); |
|
|
|
|
auto* stub = channels_[thread_idx % channels_.size()].get_stub(); |
|
|
|
|
double start = Timer::Now(); |
|
|
|
|
grpc::ClientContext context; |
|
|
|
@ -113,6 +165,7 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE { |
|
|
|
|
WaitToIssue(thread_idx); |
|
|
|
|
double start = Timer::Now(); |
|
|
|
|
if (stream_->Write(request_) && stream_->Read(&responses_[thread_idx])) { |
|
|
|
|
histogram->Add((Timer::Now() - start) * 1e9); |
|
|
|
|