Fix deadlocks in grpclb_end2end_test

pull/11726/head
David Garcia Quintas 7 years ago
parent 05f6d7e5ae
commit 51d0ff0f3d
  1. 22
      test/cpp/end2end/grpclb_end2end_test.cc

@ -194,12 +194,13 @@ class BalancerServiceImpl : public BalancerService {
for (const auto& response_and_delay : responses_and_delays) { for (const auto& response_and_delay : responses_and_delays) {
{ {
std::unique_lock<std::mutex> lock(mu_); std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) break; if (shutdown_) goto done;
} }
SendResponse(stream, response_and_delay.first, response_and_delay.second); SendResponse(stream, response_and_delay.first, response_and_delay.second);
} }
{ {
std::unique_lock<std::mutex> lock(mu_); std::unique_lock<std::mutex> lock(mu_);
if (shutdown_) goto done;
serverlist_cond_.wait(lock); serverlist_cond_.wait(lock);
} }
@ -209,6 +210,9 @@ class BalancerServiceImpl : public BalancerService {
gpr_log(GPR_INFO, "LB: recv client load report msg: '%s'", gpr_log(GPR_INFO, "LB: recv client load report msg: '%s'",
request.DebugString().c_str()); request.DebugString().c_str());
GPR_ASSERT(request.has_client_stats()); GPR_ASSERT(request.has_client_stats());
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(mu_);
client_stats_.num_calls_started += client_stats_.num_calls_started +=
request.client_stats().num_calls_started(); request.client_stats().num_calls_started();
client_stats_.num_calls_finished += client_stats_.num_calls_finished +=
@ -224,10 +228,9 @@ class BalancerServiceImpl : public BalancerService {
.num_calls_finished_with_client_failed_to_send(); .num_calls_finished_with_client_failed_to_send();
client_stats_.num_calls_finished_known_received += client_stats_.num_calls_finished_known_received +=
request.client_stats().num_calls_finished_known_received(); request.client_stats().num_calls_finished_known_received();
std::lock_guard<std::mutex> lock(mu_);
load_report_cond_.notify_one(); load_report_cond_.notify_one();
} }
done:
gpr_log(GPR_INFO, "LB: done"); gpr_log(GPR_INFO, "LB: done");
return Status::OK; return Status::OK;
} }
@ -428,19 +431,24 @@ class GrpclbEnd2endTest : public ::testing::Test {
explicit ServerThread(const grpc::string& type, explicit ServerThread(const grpc::string& type,
const grpc::string& server_host, T* service) const grpc::string& server_host, T* service)
: type_(type), service_(service) { : type_(type), service_(service) {
std::mutex mu;
// We need to acquire the lock here in order to prevent the notify_one
// by ServerThread::Start from firing before the wait below is hit.
std::unique_lock<std::mutex> lock(mu);
port_ = grpc_pick_unused_port_or_die(); port_ = grpc_pick_unused_port_or_die();
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
std::mutex mu;
std::condition_variable cond; std::condition_variable cond;
thread_.reset(new std::thread( thread_.reset(new std::thread(
std::bind(&ServerThread::Start, this, server_host, &mu, &cond))); std::bind(&ServerThread::Start, this, server_host, &mu, &cond)));
std::unique_lock<std::mutex> lock(mu);
cond.wait(lock); cond.wait(lock);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
} }
void Start(const grpc::string& server_host, std::mutex* mu, void Start(const grpc::string& server_host, std::mutex* mu,
std::condition_variable* cond) { std::condition_variable* cond) {
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(*mu);
std::ostringstream server_address; std::ostringstream server_address;
server_address << server_host << ":" << port_; server_address << server_host << ":" << port_;
ServerBuilder builder; ServerBuilder builder;
@ -448,13 +456,12 @@ class GrpclbEnd2endTest : public ::testing::Test {
InsecureServerCredentials()); InsecureServerCredentials());
builder.RegisterService(service_); builder.RegisterService(service_);
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
std::lock_guard<std::mutex> lock(*mu);
cond->notify_one(); cond->notify_one();
} }
void Shutdown() { void Shutdown() {
gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str()); gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
server_->Shutdown(); server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
thread_->join(); thread_->join();
gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str()); gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
} }
@ -820,6 +827,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
// Kill balancer 0 // Kill balancer 0
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
balancers_[0]->NotifyDoneWithServerlists();
if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown(); if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown();
gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************"); gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");

Loading…
Cancel
Save