clang-format

pull/7285/head
Vijay Pai 9 years ago
parent 40317fd720
commit 5fde20d9f0
  1. 18
      test/cpp/qps/client.h
  2. 3
      test/cpp/qps/client_async.cc
  3. 1
      test/cpp/qps/client_sync.cc
  4. 10
      test/cpp/qps/driver.cc
  5. 4
      test/cpp/qps/qps_worker.cc
  6. 7
      test/cpp/qps/server_async.cc

@ -114,10 +114,14 @@ class ClientRequestCreator<ByteBuffer> {
class HistogramEntry GRPC_FINAL { class HistogramEntry GRPC_FINAL {
public: public:
HistogramEntry(): used_(false) {} HistogramEntry() : used_(false) {}
bool used() const {return used_;} bool used() const { return used_; }
double value() const {return value_;} double value() const { return value_; }
void set_value(double v) {used_ = true; value_ = v;} void set_value(double v) {
used_ = true;
value_ = v;
}
private: private:
bool used_; bool used_;
double value_; double value_;
@ -171,6 +175,7 @@ class Client {
threads_complete_.wait(g); threads_complete_.wait(g);
} }
} }
protected: protected:
bool closed_loop_; bool closed_loop_;
@ -254,8 +259,7 @@ class Client {
n->Swap(&histogram_); n->Swap(&histogram_);
} }
void EndSwap() { void EndSwap() {}
}
void MergeStatsInto(Histogram* hist) { void MergeStatsInto(Histogram* hist) {
std::unique_lock<std::mutex> g(mu_); std::unique_lock<std::mutex> g(mu_);
@ -281,7 +285,7 @@ class Client {
done_ = true; done_ = true;
} }
if (done_) { if (done_) {
client_->CompleteThread(); client_->CompleteThread();
return; return;
} }
} }

@ -198,6 +198,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
} }
} }
} }
protected: protected:
const int num_async_threads_; const int num_async_threads_;
@ -224,7 +225,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) { for (auto cq = cli_cqs_.begin(); cq != cli_cqs_.end(); cq++) {
(*cq)->Shutdown(); (*cq)->Shutdown();
} }
this->EndThreads(); // this needed for resolution this->EndThreads(); // this needed for resolution
} }
bool ThreadFunc(HistogramEntry* entry, bool ThreadFunc(HistogramEntry* entry,

@ -87,6 +87,7 @@ class SynchronousClient
size_t num_threads_; size_t num_threads_;
std::vector<SimpleResponse> responses_; std::vector<SimpleResponse> responses_;
private: private:
void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { EndThreads(); } void DestroyMultithreading() GRPC_OVERRIDE GRPC_FINAL { EndThreads(); }
}; };

@ -348,7 +348,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
ClientArgs args; ClientArgs args;
*args.mutable_setup() = per_client_config; *args.mutable_setup() = per_client_config;
clients[i].stream = clients[i].stream =
clients[i].stub->RunClient(runsc::AllocContext(&contexts)); clients[i].stub->RunClient(runsc::AllocContext(&contexts));
if (!clients[i].stream->Write(args)) { if (!clients[i].stream->Write(args)) {
gpr_log(GPR_ERROR, "Could not write args to client %zu", i); gpr_log(GPR_ERROR, "Could not write args to client %zu", i);
} }
@ -439,7 +439,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result->add_client_success(s.ok()); result->add_client_success(s.ok());
if (!s.ok()) { if (!s.ok()) {
gpr_log(GPR_ERROR, "Client %zu had an error %s", i, gpr_log(GPR_ERROR, "Client %zu had an error %s", i,
s.error_message().c_str()); s.error_message().c_str());
} }
} }
delete[] clients; delete[] clients;
@ -475,7 +475,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result->add_server_success(s.ok()); result->add_server_success(s.ok());
if (!s.ok()) { if (!s.ok()) {
gpr_log(GPR_ERROR, "Server %zu had an error %s", i, gpr_log(GPR_ERROR, "Server %zu had an error %s", i,
s.error_message().c_str()); s.error_message().c_str());
} }
} }
@ -497,8 +497,8 @@ bool RunQuit() {
ctx.set_fail_fast(false); ctx.set_fail_fast(false);
Status s = stub->QuitWorker(&ctx, dummy, &dummy); Status s = stub->QuitWorker(&ctx, dummy, &dummy);
if (!s.ok()) { if (!s.ok()) {
gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
i, s.error_message().c_str()); s.error_message().c_str());
result = false; result = false;
} }
} }

@ -222,7 +222,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
} }
*status.mutable_stats() = client->Mark(args.mark().reset()); *status.mutable_stats() = client->Mark(args.mark().reset());
if (!stream->Write(status)) { if (!stream->Write(status)) {
return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark"); return Status(StatusCode::UNKNOWN, "Client couldn't respond to mark");
} }
gpr_log(GPR_INFO, "RunClientBody: Mark response given"); gpr_log(GPR_INFO, "RunClientBody: Mark response given");
} }
@ -267,7 +267,7 @@ class WorkerServiceImpl GRPC_FINAL : public WorkerService::Service {
} }
*status.mutable_stats() = server->Mark(args.mark().reset()); *status.mutable_stats() = server->Mark(args.mark().reset());
if (!stream->Write(status)) { if (!stream->Write(status)) {
return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark"); return Status(StatusCode::UNKNOWN, "Server couldn't respond to mark");
} }
gpr_log(GPR_INFO, "RunServerBody: Mark response given"); gpr_log(GPR_INFO, "RunServerBody: Mark response given");
} }

@ -132,8 +132,7 @@ class AsyncQpsServerTest : public Server {
(*ss)->shutdown = true; (*ss)->shutdown = true;
} }
// TODO (vpai): Remove this deadline and allow Shutdown to finish properly // TODO (vpai): Remove this deadline and allow Shutdown to finish properly
auto deadline = auto deadline = std::chrono::system_clock::now() + std::chrono::seconds(3);
std::chrono::system_clock::now() + std::chrono::seconds(3);
server_->Shutdown(deadline); server_->Shutdown(deadline);
for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) { for (auto cq = srv_cqs_.begin(); cq != srv_cqs_.end(); ++cq) {
(*cq)->Shutdown(); (*cq)->Shutdown();
@ -164,7 +163,9 @@ class AsyncQpsServerTest : public Server {
// Proceed while holding a lock to make sure that // Proceed while holding a lock to make sure that
// this thread isn't supposed to shut down // this thread isn't supposed to shut down
std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex); std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
if (shutdown_state_[thread_idx]->shutdown) { return; } if (shutdown_state_[thread_idx]->shutdown) {
return;
}
const bool still_going = ctx->RunNextState(ok); const bool still_going = ctx->RunNextState(ok);
// if this RPC context is done, refresh it // if this RPC context is done, refresh it
if (!still_going) { if (!still_going) {

Loading…
Cancel
Save