|
|
|
@ -194,12 +194,13 @@ class BalancerServiceImpl : public BalancerService { |
|
|
|
|
for (const auto& response_and_delay : responses_and_delays) { |
|
|
|
|
{ |
|
|
|
|
std::unique_lock<std::mutex> lock(mu_); |
|
|
|
|
if (shutdown_) break; |
|
|
|
|
if (shutdown_) goto done; |
|
|
|
|
} |
|
|
|
|
SendResponse(stream, response_and_delay.first, response_and_delay.second); |
|
|
|
|
} |
|
|
|
|
{ |
|
|
|
|
std::unique_lock<std::mutex> lock(mu_); |
|
|
|
|
if (shutdown_) goto done; |
|
|
|
|
serverlist_cond_.wait(lock); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -209,6 +210,9 @@ class BalancerServiceImpl : public BalancerService { |
|
|
|
|
gpr_log(GPR_INFO, "LB: recv client load report msg: '%s'", |
|
|
|
|
request.DebugString().c_str()); |
|
|
|
|
GPR_ASSERT(request.has_client_stats()); |
|
|
|
|
// We need to acquire the lock here in order to prevent the notify_one
|
|
|
|
|
// below from firing before its corresponding wait is executed.
|
|
|
|
|
std::lock_guard<std::mutex> lock(mu_); |
|
|
|
|
client_stats_.num_calls_started += |
|
|
|
|
request.client_stats().num_calls_started(); |
|
|
|
|
client_stats_.num_calls_finished += |
|
|
|
@ -224,10 +228,9 @@ class BalancerServiceImpl : public BalancerService { |
|
|
|
|
.num_calls_finished_with_client_failed_to_send(); |
|
|
|
|
client_stats_.num_calls_finished_known_received += |
|
|
|
|
request.client_stats().num_calls_finished_known_received(); |
|
|
|
|
std::lock_guard<std::mutex> lock(mu_); |
|
|
|
|
load_report_cond_.notify_one(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
done: |
|
|
|
|
gpr_log(GPR_INFO, "LB: done"); |
|
|
|
|
return Status::OK; |
|
|
|
|
} |
|
|
|
@ -428,19 +431,24 @@ class GrpclbEnd2endTest : public ::testing::Test { |
|
|
|
|
explicit ServerThread(const grpc::string& type, |
|
|
|
|
const grpc::string& server_host, T* service) |
|
|
|
|
: type_(type), service_(service) { |
|
|
|
|
std::mutex mu; |
|
|
|
|
// We need to acquire the lock here in order to prevent the notify_one
|
|
|
|
|
// by ServerThread::Start from firing before the wait below is hit.
|
|
|
|
|
std::unique_lock<std::mutex> lock(mu); |
|
|
|
|
port_ = grpc_pick_unused_port_or_die(); |
|
|
|
|
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_); |
|
|
|
|
std::mutex mu; |
|
|
|
|
std::condition_variable cond; |
|
|
|
|
thread_.reset(new std::thread( |
|
|
|
|
std::bind(&ServerThread::Start, this, server_host, &mu, &cond))); |
|
|
|
|
std::unique_lock<std::mutex> lock(mu); |
|
|
|
|
cond.wait(lock); |
|
|
|
|
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str()); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void Start(const grpc::string& server_host, std::mutex* mu, |
|
|
|
|
std::condition_variable* cond) { |
|
|
|
|
// We need to acquire the lock here in order to prevent the notify_one
|
|
|
|
|
// below from firing before its corresponding wait is executed.
|
|
|
|
|
std::lock_guard<std::mutex> lock(*mu); |
|
|
|
|
std::ostringstream server_address; |
|
|
|
|
server_address << server_host << ":" << port_; |
|
|
|
|
ServerBuilder builder; |
|
|
|
@ -448,13 +456,12 @@ class GrpclbEnd2endTest : public ::testing::Test { |
|
|
|
|
InsecureServerCredentials()); |
|
|
|
|
builder.RegisterService(service_); |
|
|
|
|
server_ = builder.BuildAndStart(); |
|
|
|
|
std::lock_guard<std::mutex> lock(*mu); |
|
|
|
|
cond->notify_one(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
void Shutdown() { |
|
|
|
|
gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str()); |
|
|
|
|
server_->Shutdown(); |
|
|
|
|
server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0)); |
|
|
|
|
thread_->join(); |
|
|
|
|
gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str()); |
|
|
|
|
} |
|
|
|
@ -820,6 +827,7 @@ TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) { |
|
|
|
|
|
|
|
|
|
// Kill balancer 0
|
|
|
|
|
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************"); |
|
|
|
|
balancers_[0]->NotifyDoneWithServerlists(); |
|
|
|
|
if (balancers_[0]->Shutdown()) balancer_servers_[0].Shutdown(); |
|
|
|
|
gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************"); |
|
|
|
|
|
|
|
|
|