Refactor to favor composition over inheritance

Also make num_threads and num_rpcs as command-line flags
pull/4828/head
Vijay Pai 9 years ago
parent ce7929c877
commit d10bbb63f8
  1. 81
      test/cpp/end2end/thread_stress_test.cc

@ -34,6 +34,7 @@
#include <mutex> #include <mutex>
#include <thread> #include <thread>
#include <gflags/gflags.h>
#include <grpc++/channel.h> #include <grpc++/channel.h>
#include <grpc++/client_context.h> #include <grpc++/client_context.h>
#include <grpc++/create_channel.h> #include <grpc++/create_channel.h>
@ -54,6 +55,9 @@ using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse; using grpc::testing::EchoResponse;
using std::chrono::system_clock; using std::chrono::system_clock;
DEFINE_int32(num_threads, 100, "Number of threads");
DEFINE_int32(num_rpcs, 1000, "Number of RPCs per thread");
namespace grpc { namespace grpc {
namespace testing { namespace testing {
@ -168,11 +172,10 @@ class TestServiceImplDupPkg
} }
}; };
class End2endTest : public ::testing::Test { class CommonStressTest {
protected: public:
End2endTest() : kMaxMessageSize_(8192) {} CommonStressTest() : kMaxMessageSize_(8192) {}
void SetUp() {
void SetUp() GRPC_OVERRIDE {
int port = grpc_pick_unused_port_or_die(); int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port; server_address_ << "localhost:" << port;
// Setup server // Setup server
@ -185,15 +188,15 @@ class End2endTest : public ::testing::Test {
builder.RegisterService(&dup_pkg_service_); builder.RegisterService(&dup_pkg_service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
} }
void TearDown() { server_->Shutdown(); }
void TearDown() GRPC_OVERRIDE { server_->Shutdown(); }
void ResetStub() { void ResetStub() {
std::shared_ptr<Channel> channel = std::shared_ptr<Channel> channel =
CreateChannel(server_address_.str(), InsecureChannelCredentials()); CreateChannel(server_address_.str(), InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel); stub_ = grpc::testing::EchoTestService::NewStub(channel);
} }
grpc::testing::EchoTestService::Stub* GetStub() { return stub_.get(); }
private:
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<Server> server_; std::unique_ptr<Server> server_;
std::ostringstream server_address_; std::ostringstream server_address_;
@ -202,6 +205,16 @@ class End2endTest : public ::testing::Test {
TestServiceImplDupPkg dup_pkg_service_; TestServiceImplDupPkg dup_pkg_service_;
}; };
class End2endTest : public ::testing::Test {
protected:
End2endTest() {}
void SetUp() GRPC_OVERRIDE { common_.SetUp(); }
void TearDown() GRPC_OVERRIDE { common_.TearDown(); }
void ResetStub() { common_.ResetStub(); }
CommonStressTest common_;
};
static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) { static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -216,27 +229,29 @@ static void SendRpc(grpc::testing::EchoTestService::Stub* stub, int num_rpcs) {
} }
TEST_F(End2endTest, ThreadStress) { TEST_F(End2endTest, ThreadStress) {
ResetStub(); common_.ResetStub();
std::vector<std::thread*> threads; std::vector<std::thread*> threads;
for (int i = 0; i < 100; ++i) { for (int i = 0; i < FLAGS_num_threads; ++i) {
threads.push_back(new std::thread(SendRpc, stub_.get(), 1000)); threads.push_back(
new std::thread(SendRpc, common_.GetStub(), FLAGS_num_rpcs));
} }
for (int i = 0; i < 100; ++i) { for (int i = 0; i < FLAGS_num_threads; ++i) {
threads[i]->join(); threads[i]->join();
delete threads[i]; delete threads[i];
} }
} }
class AsyncClientEnd2endTest : public End2endTest { class AsyncClientEnd2endTest : public ::testing::Test {
protected: protected:
AsyncClientEnd2endTest() : rpcs_outstanding_(0) {} AsyncClientEnd2endTest() : rpcs_outstanding_(0) {}
void SetUp() GRPC_OVERRIDE { common_.SetUp(); }
void TearDown() GRPC_OVERRIDE { void TearDown() GRPC_OVERRIDE {
void* ignored_tag; void* ignored_tag;
bool ignored_ok; bool ignored_ok;
while (cq_.Next(&ignored_tag, &ignored_ok)) while (cq_.Next(&ignored_tag, &ignored_ok))
; ;
End2endTest::TearDown(); common_.TearDown();
} }
void Wait() { void Wait() {
@ -260,7 +275,8 @@ class AsyncClientEnd2endTest : public End2endTest {
AsyncClientCall* call = new AsyncClientCall; AsyncClientCall* call = new AsyncClientCall;
EchoRequest request; EchoRequest request;
request.set_message("Hello"); request.set_message("Hello");
call->response_reader = stub_->AsyncEcho(&call->context, request, &cq_); call->response_reader =
common_.GetStub()->AsyncEcho(&call->context, request, &cq_);
call->response_reader->Finish(&call->response, &call->status, call->response_reader->Finish(&call->response, &call->status,
(void*)call); (void*)call);
@ -290,6 +306,7 @@ class AsyncClientEnd2endTest : public End2endTest {
} }
} }
CommonStressTest common_;
CompletionQueue cq_; CompletionQueue cq_;
std::mutex mu_; std::mutex mu_;
std::condition_variable cv_; std::condition_variable cv_;
@ -297,27 +314,26 @@ class AsyncClientEnd2endTest : public End2endTest {
}; };
TEST_F(AsyncClientEnd2endTest, ThreadStress) { TEST_F(AsyncClientEnd2endTest, ThreadStress) {
ResetStub(); common_.ResetStub();
std::vector<std::thread*> threads; std::vector<std::thread*> send_threads, completion_threads;
for (int i = 0; i < 100; ++i) { for (int i = 0; i < FLAGS_num_threads; ++i) {
threads.push_back(new std::thread( completion_threads.push_back(new std::thread(
&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc, this, 1000)); &AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this));
} }
for (int i = 0; i < 100; ++i) { for (int i = 0; i < FLAGS_num_threads; ++i) {
threads[i]->join(); send_threads.push_back(
delete threads[i]; new std::thread(&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc,
this, FLAGS_num_rpcs));
} }
for (int i = 0; i < FLAGS_num_threads; ++i) {
threads.clear(); send_threads[i]->join();
delete send_threads[i];
for (int i = 0; i < 100; ++i) {
threads.push_back(new std::thread(
&AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this));
} }
Wait(); Wait();
for (int i = 0; i < 100; ++i) { for (int i = 0; i < FLAGS_num_threads; ++i) {
threads[i]->join(); completion_threads[i]->join();
delete threads[i]; delete completion_threads[i];
} }
} }
@ -325,6 +341,7 @@ TEST_F(AsyncClientEnd2endTest, ThreadStress) {
} // namespace grpc } // namespace grpc
int main(int argc, char** argv) { int main(int argc, char** argv) {
::google::ParseCommandLineFlags(&argc, &argv, true);
grpc_test_init(argc, argv); grpc_test_init(argc, argv);
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();

Loading…
Cancel
Save