Eliminate unnecessary uses of new[]/delete[] that can be replaced

with vector

Also start eliminating uses of plain-old delete that are not helpful
pull/8638/head
Vijay Pai 8 years ago
parent 852c58e8ae
commit 90102c2bfc
  1. 6
      test/cpp/end2end/async_end2end_test.cc
  2. 14
      test/cpp/end2end/thread_stress_test.cc
  3. 7
      test/cpp/qps/client.h
  4. 12
      test/cpp/qps/client_sync.cc
  5. 13
      test/cpp/qps/driver.cc

@ -352,15 +352,13 @@ void ServerWait(Server* server, int* notify) {
} }
TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) { TEST_P(AsyncEnd2endTest, WaitAndShutdownTest) {
int notify = 0; int notify = 0;
std::thread* wait_thread = std::thread wait_thread(&ServerWait, server_.get(), &notify);
new std::thread(&ServerWait, server_.get(), &notify);
ResetStub(); ResetStub();
SendRpc(1); SendRpc(1);
EXPECT_EQ(0, notify); EXPECT_EQ(0, notify);
server_->Shutdown(); server_->Shutdown();
wait_thread->join(); wait_thread.join();
EXPECT_EQ(1, notify); EXPECT_EQ(1, notify);
delete wait_thread;
} }
TEST_P(AsyncEnd2endTest, ShutdownThenWait) { TEST_P(AsyncEnd2endTest, ShutdownThenWait) {

@ -232,13 +232,13 @@ class CommonStressTestSyncServer : public CommonStressTest<TestServiceImpl> {
class CommonStressTestAsyncServer class CommonStressTestAsyncServer
: public CommonStressTest<grpc::testing::EchoTestService::AsyncService> { : public CommonStressTest<grpc::testing::EchoTestService::AsyncService> {
public: public:
CommonStressTestAsyncServer() : contexts_(kNumAsyncServerThreads * 100) {}
void SetUp() override { void SetUp() override {
shutting_down_ = false; shutting_down_ = false;
ServerBuilder builder; ServerBuilder builder;
SetUpStart(&builder, &service_); SetUpStart(&builder, &service_);
cq_ = builder.AddCompletionQueue(); cq_ = builder.AddCompletionQueue();
SetUpEnd(&builder); SetUpEnd(&builder);
contexts_ = new Context[kNumAsyncServerThreads * 100];
for (int i = 0; i < kNumAsyncServerThreads * 100; i++) { for (int i = 0; i < kNumAsyncServerThreads * 100; i++) {
RefreshContext(i); RefreshContext(i);
} }
@ -265,7 +265,6 @@ class CommonStressTestAsyncServer
while (cq_->Next(&ignored_tag, &ignored_ok)) while (cq_->Next(&ignored_tag, &ignored_ok))
; ;
TearDownEnd(); TearDownEnd();
delete[] contexts_;
} }
private: private:
@ -311,7 +310,8 @@ class CommonStressTestAsyncServer
response_writer; response_writer;
EchoRequest recv_request; EchoRequest recv_request;
enum { READY, DONE } state; enum { READY, DONE } state;
} * contexts_; };
std::vector<Context> contexts_;
::grpc::testing::EchoTestService::AsyncService service_; ::grpc::testing::EchoTestService::AsyncService service_;
std::unique_ptr<ServerCompletionQueue> cq_; std::unique_ptr<ServerCompletionQueue> cq_;
bool shutting_down_; bool shutting_down_;
@ -353,14 +353,12 @@ typedef ::testing::Types<CommonStressTestSyncServer,
TYPED_TEST_CASE(End2endTest, CommonTypes); TYPED_TEST_CASE(End2endTest, CommonTypes);
TYPED_TEST(End2endTest, ThreadStress) { TYPED_TEST(End2endTest, ThreadStress) {
this->common_.ResetStub(); this->common_.ResetStub();
std::vector<std::thread*> threads; std::vector<std::thread> threads;
for (int i = 0; i < kNumThreads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
threads.push_back( threads.emplace_back(SendRpc, this->common_.GetStub(), kNumRpcs);
new std::thread(SendRpc, this->common_.GetStub(), kNumRpcs));
} }
for (int i = 0; i < kNumThreads; ++i) { for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join(); threads[i].join();
delete threads[i];
} }
} }

@ -163,10 +163,9 @@ class Client {
MaybeStartRequests(); MaybeStartRequests();
// avoid std::vector for old compilers that expect a copy constructor
if (reset) { if (reset) {
Histogram* to_merge = new Histogram[threads_.size()]; std::vector<Histogram> to_merge(threads_.size());
StatusHistogram* to_merge_status = new StatusHistogram[threads_.size()]; std::vector<StatusHistogram> to_merge_status(threads_.size());
for (size_t i = 0; i < threads_.size(); i++) { for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->BeginSwap(&to_merge[i], &to_merge_status[i]); threads_[i]->BeginSwap(&to_merge[i], &to_merge_status[i]);
@ -177,8 +176,6 @@ class Client {
latencies.Merge(to_merge[i]); latencies.Merge(to_merge[i]);
MergeStatusHistogram(to_merge_status[i], &statuses); MergeStatusHistogram(to_merge_status[i], &statuses);
} }
delete[] to_merge;
delete[] to_merge_status;
timer_result = timer->Mark(); timer_result = timer->Mark();
} else { } else {
// merge snapshots of each thread histogram // merge snapshots of each thread histogram

@ -138,10 +138,7 @@ class SynchronousUnaryClient final : public SynchronousClient {
class SynchronousStreamingClient final : public SynchronousClient { class SynchronousStreamingClient final : public SynchronousClient {
public: public:
SynchronousStreamingClient(const ClientConfig& config) SynchronousStreamingClient(const ClientConfig& config)
: SynchronousClient(config) { : SynchronousClient(config), context_(num_threads_), stream_(num_threads_) {
context_ = new grpc::ClientContext[num_threads_];
stream_ = new std::unique_ptr<
grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>[num_threads_];
for (size_t thread_idx = 0; thread_idx < num_threads_; thread_idx++) { for (size_t thread_idx = 0; thread_idx < num_threads_; thread_idx++) {
auto* stub = channels_[thread_idx % channels_.size()].get_stub(); auto* stub = channels_[thread_idx % channels_.size()].get_stub();
stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]); stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
@ -161,8 +158,6 @@ class SynchronousStreamingClient final : public SynchronousClient {
} }
} }
} }
delete[] stream_;
delete[] context_;
} }
bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override { bool ThreadFunc(HistogramEntry* entry, size_t thread_idx) override {
@ -182,9 +177,8 @@ class SynchronousStreamingClient final : public SynchronousClient {
private: private:
// These are both conceptually std::vector but cannot be for old compilers // These are both conceptually std::vector but cannot be for old compilers
// that expect contained classes to support copy constructors // that expect contained classes to support copy constructors
grpc::ClientContext* context_; std::vector<grpc::ClientContext> context_;
std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>* std::vector<std::unique_ptr<grpc::ClientReaderWriter<SimpleRequest, SimpleResponse>>> stream_;
stream_;
}; };
std::unique_ptr<Client> CreateSynchronousUnaryClient( std::unique_ptr<Client> CreateSynchronousUnaryClient(

@ -262,10 +262,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
workers.resize(num_clients + num_servers); workers.resize(num_clients + num_servers);
// Start servers // Start servers
using runsc::ServerData; std::vector<runsc::ServerData> servers(num_servers);
// servers is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
auto* servers = new ServerData[num_servers];
for (size_t i = 0; i < num_servers; i++) { for (size_t i = 0; i < num_servers; i++) {
gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")", gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
workers[i].c_str(), i); workers[i].c_str(), i);
@ -328,10 +325,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
// Targets are all set by now // Targets are all set by now
result_client_config = client_config; result_client_config = client_config;
// Start clients // Start clients
using runsc::ClientData; std::vector<runsc::ClientData> clients(num_clients);
// clients is array rather than std::vector to avoid gcc-4.4 issues
// where class contained in std::vector must have a copy constructor
auto* clients = new ClientData[num_clients];
size_t channels_allocated = 0; size_t channels_allocated = 0;
for (size_t i = 0; i < num_clients; i++) { for (size_t i = 0; i < num_clients; i++) {
const auto& worker = workers[i + num_servers]; const auto& worker = workers[i + num_servers];
@ -501,7 +495,6 @@ std::unique_ptr<ScenarioResult> RunScenario(
s.error_message().c_str()); s.error_message().c_str());
} }
} }
delete[] clients;
merged_latencies.FillProto(result->mutable_latencies()); merged_latencies.FillProto(result->mutable_latencies());
for (std::unordered_map<int, int64_t>::iterator it = merged_statuses.begin(); for (std::unordered_map<int, int64_t>::iterator it = merged_statuses.begin();
@ -544,8 +537,6 @@ std::unique_ptr<ScenarioResult> RunScenario(
} }
} }
delete[] servers;
postprocess_scenario_result(result.get()); postprocess_scenario_result(result.get());
return result; return result;
} }

Loading…
Cancel
Save