|
|
|
@ -43,9 +43,9 @@ |
|
|
|
|
#include <vector> |
|
|
|
|
|
|
|
|
|
#include <gflags/gflags.h> |
|
|
|
|
#include <grpc++/alarm.h> |
|
|
|
|
#include <grpc++/channel.h> |
|
|
|
|
#include <grpc++/client_context.h> |
|
|
|
|
#include <grpc++/client_context.h> |
|
|
|
|
#include <grpc++/generic/generic_stub.h> |
|
|
|
|
#include <grpc/grpc.h> |
|
|
|
|
#include <grpc/support/cpu.h> |
|
|
|
@ -60,11 +60,9 @@ |
|
|
|
|
namespace grpc { |
|
|
|
|
namespace testing { |
|
|
|
|
|
|
|
|
|
typedef std::list<grpc_time> deadline_list; |
|
|
|
|
|
|
|
|
|
class ClientRpcContext { |
|
|
|
|
public: |
|
|
|
|
explicit ClientRpcContext(int ch) : channel_id_(ch) {} |
|
|
|
|
ClientRpcContext() {} |
|
|
|
|
virtual ~ClientRpcContext() {} |
|
|
|
|
// next state, return false if done. Collect stats when appropriate
|
|
|
|
|
virtual bool RunNextState(bool, Histogram* hist) = 0; |
|
|
|
@ -74,72 +72,73 @@ class ClientRpcContext { |
|
|
|
|
return reinterpret_cast<ClientRpcContext*>(t); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
deadline_list::iterator deadline_posn() const { return deadline_posn_; } |
|
|
|
|
void set_deadline_posn(const deadline_list::iterator& it) { |
|
|
|
|
deadline_posn_ = it; |
|
|
|
|
} |
|
|
|
|
virtual void Start(CompletionQueue* cq) = 0; |
|
|
|
|
int channel_id() const { return channel_id_; } |
|
|
|
|
|
|
|
|
|
protected: |
|
|
|
|
int channel_id_; |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
deadline_list::iterator deadline_posn_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
template <class RequestType, class ResponseType> |
|
|
|
|
class ClientRpcContextUnaryImpl : public ClientRpcContext { |
|
|
|
|
public: |
|
|
|
|
ClientRpcContextUnaryImpl( |
|
|
|
|
int channel_id, BenchmarkService::Stub* stub, const RequestType& req, |
|
|
|
|
BenchmarkService::Stub* stub, const RequestType& req, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
std::function< |
|
|
|
|
std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>( |
|
|
|
|
BenchmarkService::Stub*, grpc::ClientContext*, const RequestType&, |
|
|
|
|
CompletionQueue*)> start_req, |
|
|
|
|
std::function<void(grpc::Status, ResponseType*)> on_done) |
|
|
|
|
: ClientRpcContext(channel_id), |
|
|
|
|
context_(), |
|
|
|
|
: context_(), |
|
|
|
|
stub_(stub), |
|
|
|
|
cq_(nullptr), |
|
|
|
|
req_(req), |
|
|
|
|
response_(), |
|
|
|
|
next_state_(&ClientRpcContextUnaryImpl::RespDone), |
|
|
|
|
next_state_(State::READY), |
|
|
|
|
callback_(on_done), |
|
|
|
|
next_issue_(next_issue), |
|
|
|
|
start_req_(start_req) {} |
|
|
|
|
~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {} |
|
|
|
|
void Start(CompletionQueue* cq) GRPC_OVERRIDE { |
|
|
|
|
start_ = Timer::Now(); |
|
|
|
|
response_reader_ = start_req_(stub_, &context_, req_, cq); |
|
|
|
|
response_reader_->Finish(&response_, &status_, ClientRpcContext::tag(this)); |
|
|
|
|
cq_ = cq; |
|
|
|
|
if (!next_issue_) { // ready to issue
|
|
|
|
|
RunNextState(true, nullptr); |
|
|
|
|
} else { // wait for the issue time
|
|
|
|
|
alarm_.reset(new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
~ClientRpcContextUnaryImpl() GRPC_OVERRIDE {} |
|
|
|
|
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { |
|
|
|
|
bool ret = (this->*next_state_)(ok); |
|
|
|
|
if (!ret) { |
|
|
|
|
switch (next_state_) { |
|
|
|
|
case State::READY: |
|
|
|
|
start_ = Timer::Now(); |
|
|
|
|
response_reader_ = start_req_(stub_, &context_, req_, cq_); |
|
|
|
|
response_reader_->Finish(&response_, &status_, |
|
|
|
|
ClientRpcContext::tag(this)); |
|
|
|
|
next_state_ = State::RESP_DONE; |
|
|
|
|
return true; |
|
|
|
|
case State::RESP_DONE: |
|
|
|
|
hist->Add((Timer::Now() - start_) * 1e9); |
|
|
|
|
callback_(status_, &response_); |
|
|
|
|
next_state_ = State::INVALID; |
|
|
|
|
return false; |
|
|
|
|
default: |
|
|
|
|
GPR_ASSERT(false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ClientRpcContext* StartNewClone() GRPC_OVERRIDE { |
|
|
|
|
return new ClientRpcContextUnaryImpl(channel_id_, stub_, req_, start_req_, |
|
|
|
|
return new ClientRpcContextUnaryImpl(stub_, req_, next_issue_, start_req_, |
|
|
|
|
callback_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
bool RespDone(bool) { |
|
|
|
|
next_state_ = &ClientRpcContextUnaryImpl::DoCallBack; |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
bool DoCallBack(bool) { |
|
|
|
|
callback_(status_, &response_); |
|
|
|
|
return true; // we're done, this'll be ignored
|
|
|
|
|
} |
|
|
|
|
grpc::ClientContext context_; |
|
|
|
|
BenchmarkService::Stub* stub_; |
|
|
|
|
CompletionQueue* cq_; |
|
|
|
|
std::unique_ptr<Alarm> alarm_; |
|
|
|
|
RequestType req_; |
|
|
|
|
ResponseType response_; |
|
|
|
|
bool (ClientRpcContextUnaryImpl::*next_state_)(bool); |
|
|
|
|
enum State { INVALID, READY, RESP_DONE }; |
|
|
|
|
State next_state_; |
|
|
|
|
std::function<void(grpc::Status, ResponseType*)> callback_; |
|
|
|
|
std::function<gpr_timespec()> next_issue_; |
|
|
|
|
std::function<std::unique_ptr<grpc::ClientAsyncResponseReader<ResponseType>>( |
|
|
|
|
BenchmarkService::Stub*, grpc::ClientContext*, const RequestType&, |
|
|
|
|
CompletionQueue*)> start_req_; |
|
|
|
@ -157,49 +156,35 @@ class AsyncClient : public ClientImpl<StubType, RequestType> { |
|
|
|
|
// member name resolution until the template types are fully resolved
|
|
|
|
|
public: |
|
|
|
|
using Client::SetupLoadTest; |
|
|
|
|
using Client::NextIssueTime; |
|
|
|
|
using Client::closed_loop_; |
|
|
|
|
using Client::NextIssuer; |
|
|
|
|
using ClientImpl<StubType, RequestType>::cores_; |
|
|
|
|
using ClientImpl<StubType, RequestType>::channels_; |
|
|
|
|
using ClientImpl<StubType, RequestType>::request_; |
|
|
|
|
AsyncClient(const ClientConfig& config, |
|
|
|
|
std::function<ClientRpcContext*(int, StubType*, |
|
|
|
|
std::function<ClientRpcContext*( |
|
|
|
|
StubType*, std::function<gpr_timespec()> next_issue, |
|
|
|
|
const RequestType&)> setup_ctx, |
|
|
|
|
std::function<std::unique_ptr<StubType>(std::shared_ptr<Channel>)> |
|
|
|
|
create_stub) |
|
|
|
|
: ClientImpl<StubType, RequestType>(config, create_stub), |
|
|
|
|
num_async_threads_(NumThreads(config)), |
|
|
|
|
channel_lock_(new std::mutex[config.client_channels()]), |
|
|
|
|
contexts_(config.client_channels()), |
|
|
|
|
max_outstanding_per_channel_(config.outstanding_rpcs_per_channel()), |
|
|
|
|
channel_count_(config.client_channels()), |
|
|
|
|
pref_channel_inc_(num_async_threads_) { |
|
|
|
|
num_async_threads_(NumThreads(config)) { |
|
|
|
|
SetupLoadTest(config, num_async_threads_); |
|
|
|
|
|
|
|
|
|
for (int i = 0; i < num_async_threads_; i++) { |
|
|
|
|
cli_cqs_.emplace_back(new CompletionQueue); |
|
|
|
|
if (!closed_loop_) { |
|
|
|
|
rpc_deadlines_.emplace_back(); |
|
|
|
|
next_channel_.push_back(i % channel_count_); |
|
|
|
|
issue_allowed_.emplace_back(true); |
|
|
|
|
|
|
|
|
|
grpc_time next_issue; |
|
|
|
|
NextIssueTime(i, &next_issue); |
|
|
|
|
next_issue_.push_back(next_issue); |
|
|
|
|
} |
|
|
|
|
next_issuers_.emplace_back(NextIssuer(i)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
using namespace std::placeholders; |
|
|
|
|
int t = 0; |
|
|
|
|
for (int i = 0; i < config.outstanding_rpcs_per_channel(); i++) { |
|
|
|
|
for (int ch = 0; ch < channel_count_; ch++) { |
|
|
|
|
for (int ch = 0; ch < config.client_channels(); ch++) { |
|
|
|
|
auto* cq = cli_cqs_[t].get(); |
|
|
|
|
t = (t + 1) % cli_cqs_.size(); |
|
|
|
|
auto ctx = setup_ctx(ch, channels_[ch].get_stub(), request_); |
|
|
|
|
if (closed_loop_) { |
|
|
|
|
auto ctx = |
|
|
|
|
setup_ctx(channels_[ch].get_stub(), next_issuers_[t], request_); |
|
|
|
|
ctx->Start(cq); |
|
|
|
|
} else { |
|
|
|
|
contexts_[ch].push_front(ctx); |
|
|
|
|
} |
|
|
|
|
t = (t + 1) % cli_cqs_.size(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -212,140 +197,34 @@ class AsyncClient : public ClientImpl<StubType, RequestType> { |
|
|
|
|
delete ClientRpcContext::detag(got_tag); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
// Now clear out all the pre-allocated idle contexts
|
|
|
|
|
for (int ch = 0; ch < channel_count_; ch++) { |
|
|
|
|
while (!contexts_[ch].empty()) { |
|
|
|
|
// Get an idle context from the front of the list
|
|
|
|
|
auto* ctx = *(contexts_[ch].begin()); |
|
|
|
|
contexts_[ch].pop_front(); |
|
|
|
|
delete ctx; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
delete[] channel_lock_; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
bool ThreadFunc(Histogram* histogram, |
|
|
|
|
size_t thread_idx) GRPC_OVERRIDE GRPC_FINAL { |
|
|
|
|
void* got_tag; |
|
|
|
|
bool ok; |
|
|
|
|
grpc_time deadline, short_deadline; |
|
|
|
|
if (closed_loop_) { |
|
|
|
|
deadline = grpc_time_source::now() + std::chrono::seconds(1); |
|
|
|
|
short_deadline = deadline; |
|
|
|
|
} else { |
|
|
|
|
if (rpc_deadlines_[thread_idx].empty()) { |
|
|
|
|
deadline = grpc_time_source::now() + std::chrono::seconds(1); |
|
|
|
|
} else { |
|
|
|
|
deadline = *(rpc_deadlines_[thread_idx].begin()); |
|
|
|
|
} |
|
|
|
|
short_deadline = |
|
|
|
|
issue_allowed_[thread_idx] ? next_issue_[thread_idx] : deadline; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
bool got_event; |
|
|
|
|
|
|
|
|
|
switch (cli_cqs_[thread_idx]->AsyncNext(&got_tag, &ok, short_deadline)) { |
|
|
|
|
case CompletionQueue::SHUTDOWN: |
|
|
|
|
return false; |
|
|
|
|
case CompletionQueue::TIMEOUT: |
|
|
|
|
got_event = false; |
|
|
|
|
break; |
|
|
|
|
case CompletionQueue::GOT_EVENT: |
|
|
|
|
got_event = true; |
|
|
|
|
break; |
|
|
|
|
default: |
|
|
|
|
GPR_ASSERT(false); |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
if (got_event) { |
|
|
|
|
if (cli_cqs_[thread_idx]->Next(&got_tag, &ok)) { |
|
|
|
|
// Got a regular event, so process it
|
|
|
|
|
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag); |
|
|
|
|
if (ctx->RunNextState(ok, histogram) == false) { |
|
|
|
|
// call the callback and then clone the ctx
|
|
|
|
|
ctx->RunNextState(ok, histogram); |
|
|
|
|
ClientRpcContext* clone_ctx = ctx->StartNewClone(); |
|
|
|
|
if (closed_loop_) { |
|
|
|
|
clone_ctx->Start(cli_cqs_[thread_idx].get()); |
|
|
|
|
} else { |
|
|
|
|
// Remove the entry from the rpc deadlines list
|
|
|
|
|
rpc_deadlines_[thread_idx].erase(ctx->deadline_posn()); |
|
|
|
|
// Put the clone_ctx in the list of idle contexts for this channel
|
|
|
|
|
// Under lock
|
|
|
|
|
int ch = clone_ctx->channel_id(); |
|
|
|
|
std::lock_guard<std::mutex> g(channel_lock_[ch]); |
|
|
|
|
contexts_[ch].push_front(clone_ctx); |
|
|
|
|
} |
|
|
|
|
if (!ctx->RunNextState(ok, histogram)) { |
|
|
|
|
// The RPC and callback are done, so clone the ctx
|
|
|
|
|
// and kickstart the new one
|
|
|
|
|
auto clone = ctx->StartNewClone(); |
|
|
|
|
clone->Start(cli_cqs_[thread_idx].get()); |
|
|
|
|
// delete the old version
|
|
|
|
|
delete ctx; |
|
|
|
|
} |
|
|
|
|
if (!closed_loop_) |
|
|
|
|
issue_allowed_[thread_idx] = |
|
|
|
|
true; // may be ok now even if it hadn't been
|
|
|
|
|
} |
|
|
|
|
if (!closed_loop_ && issue_allowed_[thread_idx] && |
|
|
|
|
grpc_time_source::now() >= next_issue_[thread_idx]) { |
|
|
|
|
// Attempt to issue
|
|
|
|
|
bool issued = false; |
|
|
|
|
for (int num_attempts = 0, channel_attempt = next_channel_[thread_idx]; |
|
|
|
|
num_attempts < channel_count_ && !issued; num_attempts++) { |
|
|
|
|
bool can_issue = false; |
|
|
|
|
ClientRpcContext* ctx = nullptr; |
|
|
|
|
{ |
|
|
|
|
std::lock_guard<std::mutex> g(channel_lock_[channel_attempt]); |
|
|
|
|
if (!contexts_[channel_attempt].empty()) { |
|
|
|
|
// Get an idle context from the front of the list
|
|
|
|
|
ctx = *(contexts_[channel_attempt].begin()); |
|
|
|
|
contexts_[channel_attempt].pop_front(); |
|
|
|
|
can_issue = true; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
if (can_issue) { |
|
|
|
|
// do the work to issue
|
|
|
|
|
rpc_deadlines_[thread_idx].emplace_back(grpc_time_source::now() + |
|
|
|
|
std::chrono::seconds(1)); |
|
|
|
|
auto it = rpc_deadlines_[thread_idx].end(); |
|
|
|
|
--it; |
|
|
|
|
ctx->set_deadline_posn(it); |
|
|
|
|
ctx->Start(cli_cqs_[thread_idx].get()); |
|
|
|
|
issued = true; |
|
|
|
|
// If we did issue, then next time, try our thread's next
|
|
|
|
|
// preferred channel
|
|
|
|
|
next_channel_[thread_idx] += pref_channel_inc_; |
|
|
|
|
if (next_channel_[thread_idx] >= channel_count_) |
|
|
|
|
next_channel_[thread_idx] = (thread_idx % channel_count_); |
|
|
|
|
} else { |
|
|
|
|
// Do a modular increment of channel attempt if we couldn't issue
|
|
|
|
|
channel_attempt = (channel_attempt + 1) % channel_count_; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
if (issued) { |
|
|
|
|
// We issued one; see when we can issue the next
|
|
|
|
|
grpc_time next_issue; |
|
|
|
|
NextIssueTime(thread_idx, &next_issue); |
|
|
|
|
next_issue_[thread_idx] = next_issue; |
|
|
|
|
} else { |
|
|
|
|
issue_allowed_[thread_idx] = false; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
return true; |
|
|
|
|
} else { // queue is shutting down
|
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
protected: |
|
|
|
|
int num_async_threads_; |
|
|
|
|
const int num_async_threads_; |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
class boolean { // exists only to avoid data-race on vector<bool>
|
|
|
|
|
public: |
|
|
|
|
boolean() : val_(false) {} |
|
|
|
|
boolean(bool b) : val_(b) {} |
|
|
|
|
operator bool() const { return val_; } |
|
|
|
|
boolean& operator=(bool b) { |
|
|
|
|
val_ = b; |
|
|
|
|
return *this; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
bool val_; |
|
|
|
|
}; |
|
|
|
|
int NumThreads(const ClientConfig& config) { |
|
|
|
|
int num_threads = config.async_client_threads(); |
|
|
|
|
if (num_threads <= 0) { // Use dynamic sizing
|
|
|
|
@ -356,18 +235,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> { |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_; |
|
|
|
|
|
|
|
|
|
std::vector<deadline_list> rpc_deadlines_; // per thread deadlines
|
|
|
|
|
std::vector<int> next_channel_; // per thread round-robin channel ctr
|
|
|
|
|
std::vector<boolean> issue_allowed_; // may this thread attempt to issue
|
|
|
|
|
std::vector<grpc_time> next_issue_; // when should it issue?
|
|
|
|
|
|
|
|
|
|
std::mutex* |
|
|
|
|
channel_lock_; // a vector, but avoid std::vector for old compilers
|
|
|
|
|
std::vector<context_list> contexts_; // per-channel list of idle contexts
|
|
|
|
|
int max_outstanding_per_channel_; |
|
|
|
|
int channel_count_; |
|
|
|
|
int pref_channel_inc_; |
|
|
|
|
std::vector<std::function<gpr_timespec()>> next_issuers_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
static std::unique_ptr<BenchmarkService::Stub> BenchmarkStubCreator( |
|
|
|
@ -391,11 +259,11 @@ class AsyncUnaryClient GRPC_FINAL |
|
|
|
|
const SimpleRequest& request, CompletionQueue* cq) { |
|
|
|
|
return stub->AsyncUnaryCall(ctx, request, cq); |
|
|
|
|
}; |
|
|
|
|
static ClientRpcContext* SetupCtx(int channel_id, |
|
|
|
|
BenchmarkService::Stub* stub, |
|
|
|
|
static ClientRpcContext* SetupCtx(BenchmarkService::Stub* stub, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
const SimpleRequest& req) { |
|
|
|
|
return new ClientRpcContextUnaryImpl<SimpleRequest, SimpleResponse>( |
|
|
|
|
channel_id, stub, req, AsyncUnaryClient::StartReq, |
|
|
|
|
stub, req, next_issue, AsyncUnaryClient::StartReq, |
|
|
|
|
AsyncUnaryClient::CheckDone); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
@ -404,62 +272,94 @@ template <class RequestType, class ResponseType> |
|
|
|
|
class ClientRpcContextStreamingImpl : public ClientRpcContext { |
|
|
|
|
public: |
|
|
|
|
ClientRpcContextStreamingImpl( |
|
|
|
|
int channel_id, BenchmarkService::Stub* stub, const RequestType& req, |
|
|
|
|
BenchmarkService::Stub* stub, const RequestType& req, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
std::function<std::unique_ptr< |
|
|
|
|
grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>( |
|
|
|
|
BenchmarkService::Stub*, grpc::ClientContext*, CompletionQueue*, |
|
|
|
|
void*)> start_req, |
|
|
|
|
std::function<void(grpc::Status, ResponseType*)> on_done) |
|
|
|
|
: ClientRpcContext(channel_id), |
|
|
|
|
context_(), |
|
|
|
|
: context_(), |
|
|
|
|
stub_(stub), |
|
|
|
|
cq_(nullptr), |
|
|
|
|
req_(req), |
|
|
|
|
response_(), |
|
|
|
|
next_state_(&ClientRpcContextStreamingImpl::ReqSent), |
|
|
|
|
next_state_(State::INVALID), |
|
|
|
|
callback_(on_done), |
|
|
|
|
next_issue_(next_issue), |
|
|
|
|
start_req_(start_req), |
|
|
|
|
start_(Timer::Now()) {} |
|
|
|
|
~ClientRpcContextStreamingImpl() GRPC_OVERRIDE {} |
|
|
|
|
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { |
|
|
|
|
return (this->*next_state_)(ok, hist); |
|
|
|
|
} |
|
|
|
|
ClientRpcContext* StartNewClone() GRPC_OVERRIDE { |
|
|
|
|
return new ClientRpcContextStreamingImpl(channel_id_, stub_, req_, |
|
|
|
|
start_req_, callback_); |
|
|
|
|
} |
|
|
|
|
void Start(CompletionQueue* cq) GRPC_OVERRIDE { |
|
|
|
|
cq_ = cq; |
|
|
|
|
stream_ = start_req_(stub_, &context_, cq, ClientRpcContext::tag(this)); |
|
|
|
|
next_state_ = State::STREAM_IDLE; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
bool ReqSent(bool ok, Histogram*) { return StartWrite(ok); } |
|
|
|
|
bool StartWrite(bool ok) { |
|
|
|
|
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { |
|
|
|
|
while (true) { |
|
|
|
|
switch (next_state_) { |
|
|
|
|
case State::STREAM_IDLE: |
|
|
|
|
if (!next_issue_) { // ready to issue
|
|
|
|
|
next_state_ = State::READY_TO_WRITE; |
|
|
|
|
} else { |
|
|
|
|
next_state_ = State::WAIT; |
|
|
|
|
} |
|
|
|
|
break; // loop around, don't return
|
|
|
|
|
case State::WAIT: |
|
|
|
|
alarm_.reset( |
|
|
|
|
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); |
|
|
|
|
next_state_ = State::READY_TO_WRITE; |
|
|
|
|
return true; |
|
|
|
|
case State::READY_TO_WRITE: |
|
|
|
|
if (!ok) { |
|
|
|
|
return (false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
start_ = Timer::Now(); |
|
|
|
|
next_state_ = &ClientRpcContextStreamingImpl::WriteDone; |
|
|
|
|
next_state_ = State::WRITE_DONE; |
|
|
|
|
stream_->Write(req_, ClientRpcContext::tag(this)); |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
bool WriteDone(bool ok, Histogram*) { |
|
|
|
|
case State::WRITE_DONE: |
|
|
|
|
if (!ok) { |
|
|
|
|
return (false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
next_state_ = &ClientRpcContextStreamingImpl::ReadDone; |
|
|
|
|
next_state_ = State::READ_DONE; |
|
|
|
|
stream_->Read(&response_, ClientRpcContext::tag(this)); |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
bool ReadDone(bool ok, Histogram* hist) { |
|
|
|
|
break; |
|
|
|
|
case State::READ_DONE: |
|
|
|
|
hist->Add((Timer::Now() - start_) * 1e9); |
|
|
|
|
return StartWrite(ok); |
|
|
|
|
callback_(status_, &response_); |
|
|
|
|
next_state_ = State::STREAM_IDLE; |
|
|
|
|
break; // loop around
|
|
|
|
|
default: |
|
|
|
|
GPR_ASSERT(false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
ClientRpcContext* StartNewClone() GRPC_OVERRIDE { |
|
|
|
|
return new ClientRpcContextStreamingImpl(stub_, req_, next_issue_, |
|
|
|
|
start_req_, callback_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
grpc::ClientContext context_; |
|
|
|
|
BenchmarkService::Stub* stub_; |
|
|
|
|
CompletionQueue* cq_; |
|
|
|
|
std::unique_ptr<Alarm> alarm_; |
|
|
|
|
RequestType req_; |
|
|
|
|
ResponseType response_; |
|
|
|
|
bool (ClientRpcContextStreamingImpl::*next_state_)(bool, Histogram*); |
|
|
|
|
enum State { |
|
|
|
|
INVALID, |
|
|
|
|
STREAM_IDLE, |
|
|
|
|
WAIT, |
|
|
|
|
READY_TO_WRITE, |
|
|
|
|
WRITE_DONE, |
|
|
|
|
READ_DONE |
|
|
|
|
}; |
|
|
|
|
State next_state_; |
|
|
|
|
std::function<void(grpc::Status, ResponseType*)> callback_; |
|
|
|
|
std::function<gpr_timespec()> next_issue_; |
|
|
|
|
std::function< |
|
|
|
|
std::unique_ptr<grpc::ClientAsyncReaderWriter<RequestType, ResponseType>>( |
|
|
|
|
BenchmarkService::Stub*, grpc::ClientContext*, CompletionQueue*, |
|
|
|
@ -475,9 +375,6 @@ class AsyncStreamingClient GRPC_FINAL |
|
|
|
|
public: |
|
|
|
|
explicit AsyncStreamingClient(const ClientConfig& config) |
|
|
|
|
: AsyncClient(config, SetupCtx, BenchmarkStubCreator) { |
|
|
|
|
// async streaming currently only supports closed loop
|
|
|
|
|
GPR_ASSERT(closed_loop_); |
|
|
|
|
|
|
|
|
|
StartThreads(num_async_threads_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -492,11 +389,11 @@ class AsyncStreamingClient GRPC_FINAL |
|
|
|
|
auto stream = stub->AsyncStreamingCall(ctx, cq, tag); |
|
|
|
|
return stream; |
|
|
|
|
}; |
|
|
|
|
static ClientRpcContext* SetupCtx(int channel_id, |
|
|
|
|
BenchmarkService::Stub* stub, |
|
|
|
|
static ClientRpcContext* SetupCtx(BenchmarkService::Stub* stub, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
const SimpleRequest& req) { |
|
|
|
|
return new ClientRpcContextStreamingImpl<SimpleRequest, SimpleResponse>( |
|
|
|
|
channel_id, stub, req, AsyncStreamingClient::StartReq, |
|
|
|
|
stub, req, next_issue, AsyncStreamingClient::StartReq, |
|
|
|
|
AsyncStreamingClient::CheckDone); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
@ -504,64 +401,96 @@ class AsyncStreamingClient GRPC_FINAL |
|
|
|
|
class ClientRpcContextGenericStreamingImpl : public ClientRpcContext { |
|
|
|
|
public: |
|
|
|
|
ClientRpcContextGenericStreamingImpl( |
|
|
|
|
int channel_id, grpc::GenericStub* stub, const ByteBuffer& req, |
|
|
|
|
grpc::GenericStub* stub, const ByteBuffer& req, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
std::function<std::unique_ptr<grpc::GenericClientAsyncReaderWriter>( |
|
|
|
|
grpc::GenericStub*, grpc::ClientContext*, |
|
|
|
|
const grpc::string& method_name, CompletionQueue*, void*)> start_req, |
|
|
|
|
std::function<void(grpc::Status, ByteBuffer*)> on_done) |
|
|
|
|
: ClientRpcContext(channel_id), |
|
|
|
|
context_(), |
|
|
|
|
: context_(), |
|
|
|
|
stub_(stub), |
|
|
|
|
cq_(nullptr), |
|
|
|
|
req_(req), |
|
|
|
|
response_(), |
|
|
|
|
next_state_(&ClientRpcContextGenericStreamingImpl::ReqSent), |
|
|
|
|
next_state_(State::INVALID), |
|
|
|
|
callback_(on_done), |
|
|
|
|
next_issue_(next_issue), |
|
|
|
|
start_req_(start_req), |
|
|
|
|
start_(Timer::Now()) {} |
|
|
|
|
~ClientRpcContextGenericStreamingImpl() GRPC_OVERRIDE {} |
|
|
|
|
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { |
|
|
|
|
return (this->*next_state_)(ok, hist); |
|
|
|
|
} |
|
|
|
|
ClientRpcContext* StartNewClone() GRPC_OVERRIDE { |
|
|
|
|
return new ClientRpcContextGenericStreamingImpl(channel_id_, stub_, req_, |
|
|
|
|
start_req_, callback_); |
|
|
|
|
} |
|
|
|
|
void Start(CompletionQueue* cq) GRPC_OVERRIDE { |
|
|
|
|
cq_ = cq; |
|
|
|
|
const grpc::string kMethodName( |
|
|
|
|
"/grpc.testing.BenchmarkService/StreamingCall"); |
|
|
|
|
stream_ = start_req_(stub_, &context_, kMethodName, cq, |
|
|
|
|
ClientRpcContext::tag(this)); |
|
|
|
|
next_state_ = State::STREAM_IDLE; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
bool ReqSent(bool ok, Histogram*) { return StartWrite(ok); } |
|
|
|
|
bool StartWrite(bool ok) { |
|
|
|
|
bool RunNextState(bool ok, Histogram* hist) GRPC_OVERRIDE { |
|
|
|
|
while (true) { |
|
|
|
|
switch (next_state_) { |
|
|
|
|
case State::STREAM_IDLE: |
|
|
|
|
if (!next_issue_) { // ready to issue
|
|
|
|
|
next_state_ = State::READY_TO_WRITE; |
|
|
|
|
} else { |
|
|
|
|
next_state_ = State::WAIT; |
|
|
|
|
} |
|
|
|
|
break; // loop around, don't return
|
|
|
|
|
case State::WAIT: |
|
|
|
|
alarm_.reset( |
|
|
|
|
new Alarm(cq_, next_issue_(), ClientRpcContext::tag(this))); |
|
|
|
|
next_state_ = State::READY_TO_WRITE; |
|
|
|
|
return true; |
|
|
|
|
case State::READY_TO_WRITE: |
|
|
|
|
if (!ok) { |
|
|
|
|
return (false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
start_ = Timer::Now(); |
|
|
|
|
next_state_ = &ClientRpcContextGenericStreamingImpl::WriteDone; |
|
|
|
|
next_state_ = State::WRITE_DONE; |
|
|
|
|
stream_->Write(req_, ClientRpcContext::tag(this)); |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
bool WriteDone(bool ok, Histogram*) { |
|
|
|
|
case State::WRITE_DONE: |
|
|
|
|
if (!ok) { |
|
|
|
|
return (false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
next_state_ = &ClientRpcContextGenericStreamingImpl::ReadDone; |
|
|
|
|
next_state_ = State::READ_DONE; |
|
|
|
|
stream_->Read(&response_, ClientRpcContext::tag(this)); |
|
|
|
|
return true; |
|
|
|
|
} |
|
|
|
|
bool ReadDone(bool ok, Histogram* hist) { |
|
|
|
|
break; |
|
|
|
|
case State::READ_DONE: |
|
|
|
|
hist->Add((Timer::Now() - start_) * 1e9); |
|
|
|
|
return StartWrite(ok); |
|
|
|
|
callback_(status_, &response_); |
|
|
|
|
next_state_ = State::STREAM_IDLE; |
|
|
|
|
break; // loop around
|
|
|
|
|
default: |
|
|
|
|
GPR_ASSERT(false); |
|
|
|
|
return false; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
ClientRpcContext* StartNewClone() GRPC_OVERRIDE { |
|
|
|
|
return new ClientRpcContextGenericStreamingImpl(stub_, req_, next_issue_, |
|
|
|
|
start_req_, callback_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
grpc::ClientContext context_; |
|
|
|
|
grpc::GenericStub* stub_; |
|
|
|
|
CompletionQueue* cq_; |
|
|
|
|
std::unique_ptr<Alarm> alarm_; |
|
|
|
|
ByteBuffer req_; |
|
|
|
|
ByteBuffer response_; |
|
|
|
|
bool (ClientRpcContextGenericStreamingImpl::*next_state_)(bool, Histogram*); |
|
|
|
|
enum State { |
|
|
|
|
INVALID, |
|
|
|
|
STREAM_IDLE, |
|
|
|
|
WAIT, |
|
|
|
|
READY_TO_WRITE, |
|
|
|
|
WRITE_DONE, |
|
|
|
|
READ_DONE |
|
|
|
|
}; |
|
|
|
|
State next_state_; |
|
|
|
|
std::function<void(grpc::Status, ByteBuffer*)> callback_; |
|
|
|
|
std::function<gpr_timespec()> next_issue_; |
|
|
|
|
std::function<std::unique_ptr<grpc::GenericClientAsyncReaderWriter>( |
|
|
|
|
grpc::GenericStub*, grpc::ClientContext*, const grpc::string&, |
|
|
|
|
CompletionQueue*, void*)> start_req_; |
|
|
|
@ -580,9 +509,6 @@ class GenericAsyncStreamingClient GRPC_FINAL |
|
|
|
|
public: |
|
|
|
|
explicit GenericAsyncStreamingClient(const ClientConfig& config) |
|
|
|
|
: AsyncClient(config, SetupCtx, GenericStubCreator) { |
|
|
|
|
// async streaming currently only supports closed loop
|
|
|
|
|
GPR_ASSERT(closed_loop_); |
|
|
|
|
|
|
|
|
|
StartThreads(num_async_threads_); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -596,10 +522,11 @@ class GenericAsyncStreamingClient GRPC_FINAL |
|
|
|
|
auto stream = stub->Call(ctx, method_name, cq, tag); |
|
|
|
|
return stream; |
|
|
|
|
}; |
|
|
|
|
static ClientRpcContext* SetupCtx(int channel_id, grpc::GenericStub* stub, |
|
|
|
|
static ClientRpcContext* SetupCtx(grpc::GenericStub* stub, |
|
|
|
|
std::function<gpr_timespec()> next_issue, |
|
|
|
|
const ByteBuffer& req) { |
|
|
|
|
return new ClientRpcContextGenericStreamingImpl( |
|
|
|
|
channel_id, stub, req, GenericAsyncStreamingClient::StartReq, |
|
|
|
|
stub, req, next_issue, GenericAsyncStreamingClient::StartReq, |
|
|
|
|
GenericAsyncStreamingClient::CheckDone); |
|
|
|
|
} |
|
|
|
|
}; |
|
|
|
|