Address concerns from review

pull/2771/head
vjpai 9 years ago
parent ad3e00c220
commit b1db869e1a
  1. 7
      test/cpp/qps/client.h
  2. 8
      test/cpp/qps/driver.cc
  3. 6
      test/cpp/qps/driver.h
  4. 2
      test/cpp/qps/interarrival.h
  5. 2
      test/cpp/qps/qps_driver.cc
  6. 8
      test/cpp/qps/report.cc
  7. 2
      test/cpp/qps/server_async.cc

@ -42,7 +42,6 @@
#include <condition_variable>
#include <mutex>
#include <grpc++/config.h>
#include <grpc++/config.h>
namespace grpc {
@ -83,7 +82,7 @@ class Client {
ClientStats Mark() {
Histogram latencies;
// avoid std::vector for old compilers
// avoid std::vector for old compilers that expect a copy constructor
Histogram *to_merge = new Histogram[threads_.size()];
for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->BeginSwap(&to_merge[i]);
@ -113,7 +112,7 @@ class Client {
class ClientChannelInfo {
public:
ClientChannelInfo() {}
ClientChannelInfo(const ClientChannelInfo& i) : channel_(), stub_() {
ClientChannelInfo(const ClientChannelInfo& i) {
// The copy constructor is to satisfy old compilers
// that need it for using std::vector . It is only ever
// used for empty entries
@ -237,7 +236,7 @@ class Client {
void ThreadFunc() {
for (;;) {
// run the loop body
bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_);
const bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_);
// lock, see if we're done
std::lock_guard<std::mutex> g(mu_);
if (!thread_still_ok) {

@ -97,7 +97,7 @@ struct ClientData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
}
} // namespace runsc
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
@ -152,7 +152,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
using runsc::ServerData;
// servers is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
ServerData servers[num_servers];
auto* servers = new ServerData[num_servers];
for (size_t i = 0; i < num_servers; i++) {
servers[i].stub = std::move(Worker::NewStub(
CreateChannel(workers[i], InsecureCredentials(), ChannelArguments())));
@ -180,7 +180,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
using runsc::ClientData;
// clients is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
ClientData clients[num_clients];
auto* clients = new ClientData[num_clients];
for (size_t i = 0; i < num_clients; i++) {
clients[i].stub = std::move(Worker::NewStub(CreateChannel(
workers[i + num_servers], InsecureCredentials(), ChannelArguments())));
@ -262,6 +262,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
GPR_ASSERT(server->stream->WritesDone());
GPR_ASSERT(server->stream->Finish().ok());
}
delete[] clients;
delete[] servers;
return result;
}
} // namespace testing

@ -45,9 +45,9 @@ class ResourceUsage {
public:
ResourceUsage(double w, double u, double s)
: wall_time_(w), user_time_(u), system_time_(s) {}
double wall_time() { return wall_time_; }
double user_time() { return user_time_; }
double system_time() { return system_time_; }
double wall_time() const { return wall_time_; }
double user_time() const { return user_time_; }
double system_time() const { return system_time_; }
private:
double wall_time_;

@ -149,7 +149,7 @@ class InterarrivalTimer {
for (int i = 0; i < entries; i++) {
// rand is the only choice that is portable across POSIX and Windows
// and that supports new and old compilers
double uniform_0_1 = rand() / RAND_MAX;
const double uniform_0_1 = rand() / RAND_MAX;
random_table_.push_back(
std::chrono::nanoseconds(static_cast<int64_t>(1e9 * r(uniform_0_1))));
}

@ -33,10 +33,10 @@
#include <memory>
#include <set>
#include <signal.h>
#include <gflags/gflags.h>
#include <grpc/support/log.h>
#include <signal.h>
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/report.h"

@ -140,13 +140,13 @@ void PerfDbReporter::ReportLatency(const ScenarioResult& result) {
}
void PerfDbReporter::ReportTimes(const ScenarioResult& result) {
double server_system_time = 100.0 * sum(result.server_resources, SystemTime) /
const double server_system_time = 100.0 * sum(result.server_resources, SystemTime) /
sum(result.server_resources, WallTime);
double server_user_time = 100.0 * sum(result.server_resources, UserTime) /
const double server_user_time = 100.0 * sum(result.server_resources, UserTime) /
sum(result.server_resources, WallTime);
double client_system_time = 100.0 * sum(result.client_resources, SystemTime) /
const double client_system_time = 100.0 * sum(result.client_resources, SystemTime) /
sum(result.client_resources, WallTime);
double client_user_time = 100.0 * sum(result.client_resources, UserTime) /
const double client_user_time = 100.0 * sum(result.client_resources, UserTime) /
sum(result.client_resources, WallTime);
perf_db_client_.setTimes(server_system_time, server_user_time,

@ -131,7 +131,7 @@ class AsyncQpsServerTest : public Server {
while (srv_cqs_[rank]->Next(&got_tag, &ok)) {
ServerRpcContext *ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
bool still_going = ctx->RunNextState(ok);
const bool still_going = ctx->RunNextState(ok);
if (!shutdown_state_[rank]->shutdown()) {
// this RPC context is done, so refresh it
if (!still_going) {

Loading…
Cancel
Save