[grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log (#36548)

[grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log

In this CL we are migrating from gRPCs own gpr logging mechanism to absl logging mechanism. The intention is to deprecate gpr_log in the future.

We have the following mapping
1. gpr_log(GPR_INFO,...) -> LOG(INFO)
2. gpr_log(GPR_ERROR,...) -> LOG(ERROR)
3. gpr_log(GPR_DEBUG,...) -> VLOG(2)

Reviewers need to check :
1. If the above mapping is correct.
2. The content of the log is as before.

gpr_log format strings did not use string_view or std::string . absl LOG accepts these. So there will be some elimination of string_view and std::string related conversions. This is expected.

Closes #36548

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/36548 from tanvi-jagtap:test_cpp_e2e_grpclb_clientlb_tjagtap 9f49be3401
PiperOrigin-RevId: 632020624
pull/36559/head^2
Tanvi Jagtap 10 months ago committed by Copybara-Service
parent 527950143a
commit e3be06ee38
  1. 2
      test/cpp/end2end/BUILD
  2. 164
      test/cpp/end2end/client_lb_end2end_test.cc
  3. 113
      test/cpp/end2end/grpclb_end2end_test.cc

@ -529,6 +529,7 @@ grpc_cc_test(
srcs = ["client_lb_end2end_test.cc"],
external_deps = [
"absl/log:check",
"absl/log:log",
"gtest",
],
flaky = True, # TODO(b/151315347)
@ -620,6 +621,7 @@ grpc_cc_test(
srcs = ["grpclb_end2end_test.cc"],
external_deps = [
"absl/log:check",
"absl/log:log",
"gtest",
],
flaky = True, # TODO(b/150567713)

@ -26,6 +26,7 @@
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
@ -424,7 +425,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
absl::Seconds(0.1))) {}
void Start(const std::string& server_host) {
gpr_log(GPR_INFO, "starting server on port %d", port_);
LOG(INFO) << "starting server on port " << port_;
grpc_core::MutexLock lock(&mu_);
started_ = true;
thread_ = std::make_unique<std::thread>(
@ -433,7 +434,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
cond_.Wait(&mu_);
}
server_ready_ = false;
gpr_log(GPR_INFO, "server startup complete");
LOG(INFO) << "server startup complete";
}
void Serve(const std::string& server_host) {
@ -498,10 +499,8 @@ class ClientLbEnd2endTest : public ::testing::Test {
absl::Duration timeout = absl::Seconds(30)) {
if (stop_index == 0) stop_index = servers_.size();
auto deadline = absl::Now() + (timeout * grpc_test_slowdown_factor());
gpr_log(GPR_INFO,
"========= WAITING FOR BACKENDS [%" PRIuPTR ", %" PRIuPTR
") ==========",
start_index, stop_index);
LOG(INFO) << "========= WAITING FOR BACKENDS [" << start_index << ", "
<< stop_index << ") ==========";
while (!SeenAllServers(start_index, stop_index)) {
Status status = SendRpc(stub);
if (status_check != nullptr) {
@ -636,17 +635,17 @@ TEST_F(ClientLbEnd2endTest, ChannelIdleness) {
// The initial channel state should be IDLE.
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
// After sending RPC, channel state should be READY.
gpr_log(GPR_INFO, "*** SENDING RPC, CHANNEL SHOULD CONNECT ***");
LOG(INFO) << "*** SENDING RPC, CHANNEL SHOULD CONNECT ***";
response_generator.SetNextResolution(GetServersPorts());
CheckRpcSendOk(DEBUG_LOCATION, stub);
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
// After a period time not using the channel, the channel state should switch
// to IDLE.
gpr_log(GPR_INFO, "*** WAITING FOR CHANNEL TO GO IDLE ***");
LOG(INFO) << "*** WAITING FOR CHANNEL TO GO IDLE ***";
gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(1200));
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_IDLE);
// Sending a new RPC should awake the IDLE channel.
gpr_log(GPR_INFO, "*** SENDING ANOTHER RPC, CHANNEL SHOULD RECONNECT ***");
LOG(INFO) << "*** SENDING ANOTHER RPC, CHANNEL SHOULD RECONNECT ***";
response_generator.SetNextResolution(GetServersPorts());
CheckRpcSendOk(DEBUG_LOCATION, stub);
EXPECT_EQ(channel->GetState(false), GRPC_CHANNEL_READY);
@ -830,7 +829,7 @@ TEST_F(PickFirstTest, BackOffInitialReconnect) {
ASSERT_TRUE(WaitForChannelReady(channel.get()));
// Check how long it took.
const grpc_core::Duration waited = grpc_core::Timestamp::Now() - t0;
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis());
VLOG(2) << "Waited " << waited.millis() << " milliseconds";
// We should have waited at least kInitialBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited.millis(),
@ -861,7 +860,7 @@ TEST_F(PickFirstTest, BackOffMinReconnect) {
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_core::Duration waited =
grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis());
VLOG(2) << "Waited " << waited.millis() << " milliseconds";
// We should have waited at least kMinReconnectBackOffMs. We substract one to
// account for test and precision accuracy drift.
EXPECT_GE(waited.millis(),
@ -898,7 +897,7 @@ TEST_F(PickFirstTest, ResetConnectionBackoff) {
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_core::Duration waited =
grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis());
VLOG(2) << "Waited " << waited.millis() << " milliseconds";
// We should have waited less than kInitialBackOffMs.
EXPECT_LT(waited.millis(), kInitialBackOffMs * grpc_test_slowdown_factor());
}
@ -919,33 +918,33 @@ TEST_F(ClientLbEnd2endTest,
response_generator.SetNextResolution({port});
// Intercept initial connection attempt.
auto hold1 = injector.AddHold(port);
gpr_log(GPR_INFO, "=== TRIGGERING INITIAL CONNECTION ATTEMPT");
LOG(INFO) << "=== TRIGGERING INITIAL CONNECTION ATTEMPT";
EXPECT_EQ(GRPC_CHANNEL_IDLE, channel->GetState(/*try_to_connect=*/true));
hold1->Wait();
EXPECT_EQ(GRPC_CHANNEL_CONNECTING,
channel->GetState(/*try_to_connect=*/false));
// Reset backoff.
gpr_log(GPR_INFO, "=== RESETTING BACKOFF");
LOG(INFO) << "=== RESETTING BACKOFF";
experimental::ChannelResetConnectionBackoff(channel.get());
// Intercept next attempt. Do this before resuming the first attempt,
// just in case the client makes progress faster than this thread.
auto hold2 = injector.AddHold(port);
// Fail current attempt and wait for next one to start.
gpr_log(GPR_INFO, "=== RESUMING INITIAL ATTEMPT");
LOG(INFO) << "=== RESUMING INITIAL ATTEMPT";
const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
hold1->Resume();
gpr_log(GPR_INFO, "=== WAITING FOR SECOND ATTEMPT");
LOG(INFO) << "=== WAITING FOR SECOND ATTEMPT";
// This WaitForStateChange() call just makes sure we're doing some polling.
EXPECT_TRUE(channel->WaitForStateChange(GRPC_CHANNEL_CONNECTING,
grpc_timeout_seconds_to_deadline(1)));
hold2->Wait();
const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_log(GPR_INFO, "=== RESUMING SECOND ATTEMPT");
LOG(INFO) << "=== RESUMING SECOND ATTEMPT";
hold2->Resume();
// Elapsed time should be very short, much less than kInitialBackOffMs.
const grpc_core::Duration waited =
grpc_core::Duration::FromTimespec(gpr_time_sub(t1, t0));
gpr_log(GPR_DEBUG, "Waited %" PRId64 " milliseconds", waited.millis());
VLOG(2) << "Waited " << waited.millis() << " milliseconds";
EXPECT_LT(waited.millis(), 1000 * grpc_test_slowdown_factor());
}
@ -958,21 +957,21 @@ TEST_F(PickFirstTest, Updates) {
auto stub = BuildStub(channel);
// Perform one RPC against the first server.
response_generator.SetNextResolution(GetServersPorts(0, 1));
gpr_log(GPR_INFO, "****** SET [0] *******");
LOG(INFO) << "****** SET [0] *******";
CheckRpcSendOk(DEBUG_LOCATION, stub);
EXPECT_EQ(servers_[0]->service_.request_count(), 1);
// An empty update will result in the channel going into TRANSIENT_FAILURE.
response_generator.SetNextResolution({});
gpr_log(GPR_INFO, "****** SET none *******");
LOG(INFO) << "****** SET none *******";
WaitForChannelNotReady(channel.get());
// Next update introduces servers_[1], making the channel recover.
response_generator.SetNextResolution(GetServersPorts(1, 2));
gpr_log(GPR_INFO, "****** SET [1] *******");
LOG(INFO) << "****** SET [1] *******";
WaitForChannelReady(channel.get());
WaitForServer(DEBUG_LOCATION, stub, 1);
// And again for servers_[2]
response_generator.SetNextResolution(GetServersPorts(2, 3));
gpr_log(GPR_INFO, "****** SET [2] *******");
LOG(INFO) << "****** SET [2] *******";
WaitForServer(DEBUG_LOCATION, stub, 2);
// Check LB policy name for the channel.
EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
@ -991,7 +990,7 @@ TEST_F(PickFirstTest, UpdateSuperset) {
// Perform one RPC against the first server.
ports.emplace_back(servers_[0]->port_);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET [0] *******");
LOG(INFO) << "****** SET [0] *******";
CheckRpcSendOk(DEBUG_LOCATION, stub);
EXPECT_EQ(servers_[0]->service_.request_count(), 1);
servers_[0]->service_.ResetCounters();
@ -1001,7 +1000,7 @@ TEST_F(PickFirstTest, UpdateSuperset) {
ports.emplace_back(servers_[1]->port_);
ports.emplace_back(servers_[0]->port_);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET superset *******");
LOG(INFO) << "****** SET superset *******";
CheckRpcSendOk(DEBUG_LOCATION, stub);
// We stick to the previously connected server.
WaitForServer(DEBUG_LOCATION, stub, 0);
@ -1024,7 +1023,7 @@ TEST_F(PickFirstTest, UpdateToUnconnected) {
// Try to send rpcs against a list where the server is available.
ports.emplace_back(servers_[0]->port_);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET [0] *******");
LOG(INFO) << "****** SET [0] *******";
CheckRpcSendOk(DEBUG_LOCATION, stub);
// Send resolution for which all servers are currently unavailable. Eventually
@ -1034,12 +1033,12 @@ TEST_F(PickFirstTest, UpdateToUnconnected) {
ports.emplace_back(grpc_pick_unused_port_or_die());
ports.emplace_back(servers_[1]->port_);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** SET [unavailable] *******");
LOG(INFO) << "****** SET [unavailable] *******";
EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
// Ensure that the last resolution was installed correctly by verifying that
// the channel becomes ready once one of if its endpoints becomes available.
gpr_log(GPR_INFO, "****** StartServer(1) *******");
LOG(INFO) << "****** StartServer(1) *******";
StartServer(1);
EXPECT_TRUE(WaitForChannelReady(channel.get()));
}
@ -1135,21 +1134,21 @@ TEST_F(PickFirstTest, ReresolutionNoSelected) {
// The initial resolution only contains dead ports. There won't be any
// selected subchannel. Re-resolution will return the same result.
response_generator.SetNextResolution(dead_ports);
gpr_log(GPR_INFO, "****** INITIAL RESOLUTION SET *******");
LOG(INFO) << "****** INITIAL RESOLUTION SET *******";
for (size_t i = 0; i < 10; ++i) {
CheckRpcSendFailure(
DEBUG_LOCATION, stub, StatusCode::UNAVAILABLE,
MakeConnectionFailureRegex("failed to connect to all addresses"));
}
// PF should request re-resolution.
gpr_log(GPR_INFO, "****** WAITING FOR RE-RESOLUTION *******");
LOG(INFO) << "****** WAITING FOR RE-RESOLUTION *******";
EXPECT_TRUE(response_generator.Get()->WaitForReresolutionRequest(
absl::Seconds(5 * grpc_test_slowdown_factor())));
gpr_log(GPR_INFO, "****** RE-RESOLUTION SEEN *******");
LOG(INFO) << "****** RE-RESOLUTION SEEN *******";
// Send a resolver result that contains reachable ports, so that the
// pick_first LB policy can recover soon.
response_generator.SetNextResolution(alive_ports);
gpr_log(GPR_INFO, "****** RE-RESOLUTION SENT *******");
LOG(INFO) << "****** RE-RESOLUTION SENT *******";
WaitForServer(DEBUG_LOCATION, stub, 0, [](const Status& status) {
EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
EXPECT_THAT(status.error_message(),
@ -1169,12 +1168,12 @@ TEST_F(PickFirstTest, ReconnectWithoutNewResolverResult) {
auto channel = BuildChannel("pick_first", response_generator);
auto stub = BuildStub(channel);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
LOG(INFO) << "****** INITIAL CONNECTION *******";
WaitForServer(DEBUG_LOCATION, stub, 0);
gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
LOG(INFO) << "****** STOPPING SERVER ******";
servers_[0]->Shutdown();
EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
gpr_log(GPR_INFO, "****** RESTARTING SERVER ******");
LOG(INFO) << "****** RESTARTING SERVER ******";
StartServers(1, ports);
WaitForServer(DEBUG_LOCATION, stub, 0);
}
@ -1188,12 +1187,12 @@ TEST_F(PickFirstTest, ReconnectWithoutNewResolverResultStartsFromTopOfList) {
auto channel = BuildChannel("pick_first", response_generator);
auto stub = BuildStub(channel);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** INITIAL CONNECTION *******");
LOG(INFO) << "****** INITIAL CONNECTION *******";
WaitForServer(DEBUG_LOCATION, stub, 1);
gpr_log(GPR_INFO, "****** STOPPING SERVER ******");
LOG(INFO) << "****** STOPPING SERVER ******";
servers_[1]->Shutdown();
EXPECT_TRUE(WaitForChannelNotReady(channel.get()));
gpr_log(GPR_INFO, "****** STARTING BOTH SERVERS ******");
LOG(INFO) << "****** STARTING BOTH SERVERS ******";
StartServers(2, ports);
WaitForServer(DEBUG_LOCATION, stub, 0);
}
@ -1202,21 +1201,21 @@ TEST_F(PickFirstTest, FailsEmptyResolverUpdate) {
FakeResolverResponseGeneratorWrapper response_generator;
auto channel = BuildChannel("pick_first", response_generator);
auto stub = BuildStub(channel);
gpr_log(GPR_INFO, "****** SENDING INITIAL RESOLVER RESULT *******");
LOG(INFO) << "****** SENDING INITIAL RESOLVER RESULT *******";
// Send a resolver result with an empty address list and a callback
// that triggers a notification.
grpc_core::Notification notification;
grpc_core::Resolver::Result result;
result.addresses.emplace();
result.result_health_callback = [&](absl::Status status) {
gpr_log(GPR_INFO, "****** RESULT HEALTH CALLBACK *******");
LOG(INFO) << "****** RESULT HEALTH CALLBACK *******";
EXPECT_EQ(absl::StatusCode::kUnavailable, status.code());
EXPECT_EQ("address list must not be empty", status.message()) << status;
notification.Notify();
};
response_generator.SetResponse(std::move(result));
// Wait for channel to report TRANSIENT_FAILURE.
gpr_log(GPR_INFO, "****** TELLING CHANNEL TO CONNECT *******");
LOG(INFO) << "****** TELLING CHANNEL TO CONNECT *******";
auto predicate = [](grpc_connectivity_state state) {
return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
};
@ -1225,10 +1224,10 @@ TEST_F(PickFirstTest, FailsEmptyResolverUpdate) {
// Callback should run.
notification.WaitForNotification();
// Return a valid address.
gpr_log(GPR_INFO, "****** SENDING NEXT RESOLVER RESULT *******");
LOG(INFO) << "****** SENDING NEXT RESOLVER RESULT *******";
StartServers(1);
response_generator.SetNextResolution(GetServersPorts());
gpr_log(GPR_INFO, "****** SENDING WAIT_FOR_READY RPC *******");
LOG(INFO) << "****** SENDING WAIT_FOR_READY RPC *******";
CheckRpcSendOk(DEBUG_LOCATION, stub, /*wait_for_ready=*/true);
}
@ -1239,22 +1238,22 @@ TEST_F(PickFirstTest, CheckStateBeforeStartWatch) {
auto channel_1 = BuildChannel("pick_first", response_generator);
auto stub_1 = BuildStub(channel_1);
response_generator.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 1 *******");
LOG(INFO) << "****** RESOLUTION SET FOR CHANNEL 1 *******";
WaitForServer(DEBUG_LOCATION, stub_1, 0);
gpr_log(GPR_INFO, "****** CHANNEL 1 CONNECTED *******");
LOG(INFO) << "****** CHANNEL 1 CONNECTED *******";
servers_[0]->Shutdown();
EXPECT_TRUE(WaitForChannelNotReady(channel_1.get()));
// Channel 1 will receive a re-resolution containing the same server. It will
// create a new subchannel and hold a ref to it.
StartServers(1, ports);
gpr_log(GPR_INFO, "****** SERVER RESTARTED *******");
LOG(INFO) << "****** SERVER RESTARTED *******";
FakeResolverResponseGeneratorWrapper response_generator_2;
auto channel_2 = BuildChannel("pick_first", response_generator_2);
auto stub_2 = BuildStub(channel_2);
response_generator_2.SetNextResolution(ports);
gpr_log(GPR_INFO, "****** RESOLUTION SET FOR CHANNEL 2 *******");
LOG(INFO) << "****** RESOLUTION SET FOR CHANNEL 2 *******";
WaitForServer(DEBUG_LOCATION, stub_2, 0);
gpr_log(GPR_INFO, "****** CHANNEL 2 CONNECTED *******");
LOG(INFO) << "****** CHANNEL 2 CONNECTED *******";
servers_[0]->Shutdown();
// Wait until the disconnection has triggered the connectivity notification.
// Otherwise, the subchannel may be picked for next call but will fail soon.
@ -1262,11 +1261,11 @@ TEST_F(PickFirstTest, CheckStateBeforeStartWatch) {
// Channel 2 will also receive a re-resolution containing the same server.
// Both channels will ref the same subchannel that failed.
StartServers(1, ports);
gpr_log(GPR_INFO, "****** SERVER RESTARTED AGAIN *******");
gpr_log(GPR_INFO, "****** CHANNEL 2 STARTING A CALL *******");
LOG(INFO) << "****** SERVER RESTARTED AGAIN *******";
LOG(INFO) << "****** CHANNEL 2 STARTING A CALL *******";
// The first call after the server restart will succeed.
CheckRpcSendOk(DEBUG_LOCATION, stub_2);
gpr_log(GPR_INFO, "****** CHANNEL 2 FINISHED A CALL *******");
LOG(INFO) << "****** CHANNEL 2 FINISHED A CALL *******";
// Check LB policy name for the channel.
EXPECT_EQ("pick_first", channel_1->GetLoadBalancingPolicyName());
// Check LB policy name for the channel.
@ -1411,7 +1410,7 @@ TEST_F(RoundRobinTest, Updates) {
auto channel = BuildChannel("round_robin", response_generator);
auto stub = BuildStub(channel);
// Start with a single server.
gpr_log(GPR_INFO, "*** FIRST BACKEND ***");
LOG(INFO) << "*** FIRST BACKEND ***";
std::vector<int> ports = {servers_[0]->port_};
response_generator.SetNextResolution(ports);
WaitForServer(DEBUG_LOCATION, stub, 0);
@ -1422,7 +1421,7 @@ TEST_F(RoundRobinTest, Updates) {
EXPECT_EQ(0, servers_[2]->service_.request_count());
ResetCounters();
// And now for the second server.
gpr_log(GPR_INFO, "*** SECOND BACKEND ***");
LOG(INFO) << "*** SECOND BACKEND ***";
ports.clear();
ports.emplace_back(servers_[1]->port_);
response_generator.SetNextResolution(ports);
@ -1436,7 +1435,7 @@ TEST_F(RoundRobinTest, Updates) {
EXPECT_EQ(0, servers_[2]->service_.request_count());
ResetCounters();
// ... and for the last server.
gpr_log(GPR_INFO, "*** THIRD BACKEND ***");
LOG(INFO) << "*** THIRD BACKEND ***";
ports.clear();
ports.emplace_back(servers_[2]->port_);
response_generator.SetNextResolution(ports);
@ -1447,7 +1446,7 @@ TEST_F(RoundRobinTest, Updates) {
EXPECT_EQ(10, servers_[2]->service_.request_count());
ResetCounters();
// Back to all servers.
gpr_log(GPR_INFO, "*** ALL BACKENDS ***");
LOG(INFO) << "*** ALL BACKENDS ***";
ports.clear();
ports.emplace_back(servers_[0]->port_);
ports.emplace_back(servers_[1]->port_);
@ -1461,7 +1460,7 @@ TEST_F(RoundRobinTest, Updates) {
EXPECT_EQ(1, servers_[2]->service_.request_count());
ResetCounters();
// An empty update will result in the channel going into TRANSIENT_FAILURE.
gpr_log(GPR_INFO, "*** NO BACKENDS ***");
LOG(INFO) << "*** NO BACKENDS ***";
ports.clear();
response_generator.SetNextResolution(ports);
WaitForChannelNotReady(channel.get());
@ -1469,7 +1468,7 @@ TEST_F(RoundRobinTest, Updates) {
"empty address list: fake resolver empty address list");
servers_[0]->service_.ResetCounters();
// Next update introduces servers_[1], making the channel recover.
gpr_log(GPR_INFO, "*** BACK TO SECOND BACKEND ***");
LOG(INFO) << "*** BACK TO SECOND BACKEND ***";
ports.clear();
ports.emplace_back(servers_[1]->port_);
response_generator.SetNextResolution(ports);
@ -1550,15 +1549,15 @@ TEST_F(RoundRobinTest, ReresolveOnSubchannelConnectionFailure) {
// Wait for both servers to be seen.
WaitForServers(DEBUG_LOCATION, stub, 0, 2);
// Have server 0 send a GOAWAY. This should trigger a re-resolution.
gpr_log(GPR_INFO, "****** SENDING GOAWAY FROM SERVER 0 *******");
LOG(INFO) << "****** SENDING GOAWAY FROM SERVER 0 *******";
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Server::FromC(servers_[0]->server_->c_server())->SendGoaways();
}
gpr_log(GPR_INFO, "****** WAITING FOR RE-RESOLUTION REQUEST *******");
LOG(INFO) << "****** WAITING FOR RE-RESOLUTION REQUEST *******";
EXPECT_TRUE(response_generator.Get()->WaitForReresolutionRequest(
absl::Seconds(5 * grpc_test_slowdown_factor())));
gpr_log(GPR_INFO, "****** RE-RESOLUTION REQUEST SEEN *******");
LOG(INFO) << "****** RE-RESOLUTION REQUEST SEEN *******";
// Tell the fake resolver to send an update that adds the last server, but
// only when the LB policy requests re-resolution.
ports.push_back(servers_[2]->port_);
@ -1571,7 +1570,7 @@ TEST_F(RoundRobinTest, FailsEmptyResolverUpdate) {
FakeResolverResponseGeneratorWrapper response_generator;
auto channel = BuildChannel("round_robin", response_generator);
auto stub = BuildStub(channel);
gpr_log(GPR_INFO, "****** SENDING INITIAL RESOLVER RESULT *******");
LOG(INFO) << "****** SENDING INITIAL RESOLVER RESULT *******";
// Send a resolver result with an empty address list and a callback
// that triggers a notification.
grpc_core::Notification notification;
@ -1585,7 +1584,7 @@ TEST_F(RoundRobinTest, FailsEmptyResolverUpdate) {
};
response_generator.SetResponse(std::move(result));
// Wait for channel to report TRANSIENT_FAILURE.
gpr_log(GPR_INFO, "****** TELLING CHANNEL TO CONNECT *******");
LOG(INFO) << "****** TELLING CHANNEL TO CONNECT *******";
auto predicate = [](grpc_connectivity_state state) {
return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
};
@ -1594,10 +1593,10 @@ TEST_F(RoundRobinTest, FailsEmptyResolverUpdate) {
// Callback should have been run.
notification.WaitForNotification();
// Return a valid address.
gpr_log(GPR_INFO, "****** SENDING NEXT RESOLVER RESULT *******");
LOG(INFO) << "****** SENDING NEXT RESOLVER RESULT *******";
StartServers(1);
response_generator.SetNextResolution(GetServersPorts());
gpr_log(GPR_INFO, "****** SENDING WAIT_FOR_READY RPC *******");
LOG(INFO) << "****** SENDING WAIT_FOR_READY RPC *******";
CheckRpcSendOk(DEBUG_LOCATION, stub, /*wait_for_ready=*/true);
}
@ -1658,7 +1657,7 @@ TEST_F(RoundRobinTest, StaysInTransientFailureInSubsequentConnecting) {
response_generator.SetNextResolution({port});
// Allow first connection attempt to fail normally, and wait for
// channel to report TRANSIENT_FAILURE.
gpr_log(GPR_INFO, "=== WAITING FOR CHANNEL TO REPORT TF ===");
LOG(INFO) << "=== WAITING FOR CHANNEL TO REPORT TF ===";
auto predicate = [](grpc_connectivity_state state) {
return state == GRPC_CHANNEL_TRANSIENT_FAILURE;
};
@ -1673,7 +1672,7 @@ TEST_F(RoundRobinTest, StaysInTransientFailureInSubsequentConnecting) {
EXPECT_EQ(GRPC_CHANNEL_TRANSIENT_FAILURE, channel->GetState(false));
// Send a few RPCs, just to give the channel a chance to propagate a
// new picker, in case it was going to incorrectly do so.
gpr_log(GPR_INFO, "=== EXPECTING RPCs TO FAIL ===");
LOG(INFO) << "=== EXPECTING RPCs TO FAIL ===";
for (size_t i = 0; i < 5; ++i) {
CheckRpcSendFailure(
DEBUG_LOCATION, stub, StatusCode::UNAVAILABLE,
@ -1718,7 +1717,7 @@ TEST_F(RoundRobinTest, ReportsLatestStatusInTransientFailure) {
"Survey says... Bzzzzt!"))(status.error_message())) {
break;
}
gpr_log(GPR_INFO, "STATUS MESSAGE: %s", status.error_message().c_str());
LOG(INFO) << "STATUS MESSAGE: " << status.error_message();
EXPECT_THAT(status.error_message(),
::testing::MatchesRegex(MakeConnectionFailureRegex(
"connections to all backends failing")));
@ -1740,21 +1739,21 @@ TEST_F(RoundRobinTest, DoesNotFailRpcsUponDisconnection) {
auto stub = BuildStub(channel);
response_generator.SetNextResolution(GetServersPorts());
// Start a thread constantly sending RPCs in a loop.
gpr_log(GPR_INFO, "=== STARTING CLIENT THREAD ===");
LOG(INFO) << "=== STARTING CLIENT THREAD ===";
std::atomic<bool> shutdown{false};
gpr_event ev;
gpr_event_init(&ev);
std::thread thd([&]() {
gpr_log(GPR_INFO, "sending first RPC");
LOG(INFO) << "sending first RPC";
CheckRpcSendOk(DEBUG_LOCATION, stub);
gpr_event_set(&ev, reinterpret_cast<void*>(1));
while (!shutdown.load()) {
gpr_log(GPR_INFO, "sending RPC");
LOG(INFO) << "sending RPC";
CheckRpcSendOk(DEBUG_LOCATION, stub);
}
});
// Wait for first RPC to complete.
gpr_log(GPR_INFO, "=== WAITING FOR FIRST RPC TO COMPLETE ===");
LOG(INFO) << "=== WAITING FOR FIRST RPC TO COMPLETE ===";
ASSERT_EQ(reinterpret_cast<void*>(1),
gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(1)));
// Channel should now be READY.
@ -1765,7 +1764,7 @@ TEST_F(RoundRobinTest, DoesNotFailRpcsUponDisconnection) {
// Now kill the server. The subchannel should report IDLE and be
// immediately reconnected to, but this should not cause any test
// failures.
gpr_log(GPR_INFO, "=== SHUTTING DOWN SERVER ===");
LOG(INFO) << "=== SHUTTING DOWN SERVER ===";
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Server::FromC(servers_[0]->server_->c_server())->SendGoaways();
@ -1773,17 +1772,17 @@ TEST_F(RoundRobinTest, DoesNotFailRpcsUponDisconnection) {
gpr_sleep_until(grpc_timeout_seconds_to_deadline(1));
servers_[0]->Shutdown();
// Wait for next attempt to start.
gpr_log(GPR_INFO, "=== WAITING FOR RECONNECTION ATTEMPT ===");
LOG(INFO) << "=== WAITING FOR RECONNECTION ATTEMPT ===";
hold1->Wait();
// Start server and allow attempt to continue.
gpr_log(GPR_INFO, "=== RESTARTING SERVER ===");
LOG(INFO) << "=== RESTARTING SERVER ===";
StartServer(0);
hold1->Resume();
// Wait for next attempt to complete.
gpr_log(GPR_INFO, "=== WAITING FOR RECONNECTION ATTEMPT TO COMPLETE ===");
LOG(INFO) << "=== WAITING FOR RECONNECTION ATTEMPT TO COMPLETE ===";
hold1->WaitForCompletion();
// Now shut down the thread.
gpr_log(GPR_INFO, "=== SHUTTING DOWN CLIENT THREAD ===");
LOG(INFO) << "=== SHUTTING DOWN CLIENT THREAD ===";
shutdown.store(true);
thd.join();
}
@ -1871,14 +1870,13 @@ TEST_F(RoundRobinTest, HealthChecking) {
auto stub = BuildStub(channel);
response_generator.SetNextResolution(GetServersPorts());
// Channel should not become READY, because health checks should be failing.
gpr_log(GPR_INFO,
"*** initial state: unknown health check service name for "
"all servers");
LOG(INFO)
<< "*** initial state: unknown health check service name for all servers";
EXPECT_FALSE(WaitForChannelReady(channel.get(), 1));
// Now set one of the servers to be healthy.
// The channel should become healthy and all requests should go to
// the healthy server.
gpr_log(GPR_INFO, "*** server 0 healthy");
LOG(INFO) << "*** server 0 healthy";
servers_[0]->SetServingStatus("health_check_service_name", true);
EXPECT_TRUE(WaitForChannelReady(channel.get()));
// New channel state may be reported before the picker is updated, so
@ -1891,7 +1889,7 @@ TEST_F(RoundRobinTest, HealthChecking) {
EXPECT_EQ(0, servers_[1]->service_.request_count());
EXPECT_EQ(0, servers_[2]->service_.request_count());
// Now set a second server to be healthy.
gpr_log(GPR_INFO, "*** server 2 healthy");
LOG(INFO) << "*** server 2 healthy";
servers_[2]->SetServingStatus("health_check_service_name", true);
WaitForServer(DEBUG_LOCATION, stub, 2);
for (int i = 0; i < 10; ++i) {
@ -1901,7 +1899,7 @@ TEST_F(RoundRobinTest, HealthChecking) {
EXPECT_EQ(0, servers_[1]->service_.request_count());
EXPECT_EQ(5, servers_[2]->service_.request_count());
// Now set the remaining server to be healthy.
gpr_log(GPR_INFO, "*** server 1 healthy");
LOG(INFO) << "*** server 1 healthy";
servers_[1]->SetServingStatus("health_check_service_name", true);
WaitForServer(DEBUG_LOCATION, stub, 1);
for (int i = 0; i < 9; ++i) {
@ -1914,7 +1912,7 @@ TEST_F(RoundRobinTest, HealthChecking) {
// unhealthiness has hit the client. We know that the client will see
// this when we send kNumServers requests and one of the remaining servers
// sees two of the requests.
gpr_log(GPR_INFO, "*** server 0 unhealthy");
LOG(INFO) << "*** server 0 unhealthy";
servers_[0]->SetServingStatus("health_check_service_name", false);
do {
ResetCounters();
@ -1925,7 +1923,7 @@ TEST_F(RoundRobinTest, HealthChecking) {
servers_[2]->service_.request_count() != 2);
// Now set the remaining two servers to be unhealthy. Make sure the
// channel leaves READY state and that RPCs fail.
gpr_log(GPR_INFO, "*** all servers unhealthy");
LOG(INFO) << "*** all servers unhealthy";
servers_[1]->SetServingStatus("health_check_service_name", false);
servers_[2]->SetServingStatus("health_check_service_name", false);
EXPECT_TRUE(WaitForChannelNotReady(channel.get()));

@ -27,6 +27,7 @@
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
@ -225,7 +226,7 @@ class BalancerServiceImpl : public BalancerService {
shutdown_ = true;
}
ShutdownStream();
gpr_log(GPR_INFO, "LB[%p]: shut down", this);
LOG(INFO) << "LB[" << this << "]: shut down";
}
void set_client_load_reporting_interval_seconds(int seconds) {
@ -285,11 +286,11 @@ class BalancerServiceImpl : public BalancerService {
private:
// Request handler.
Status BalanceLoad(ServerContext* context, Stream* stream) override {
gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
LOG(INFO) << "LB[" << this << "]: BalanceLoad";
{
grpc_core::MutexLock lock(&mu_);
if (shutdown_) {
gpr_log(GPR_INFO, "LB[%p]: shutdown at stream start", this);
LOG(INFO) << "LB[" << this << "]: shutdown at stream start";
return Status::OK;
}
}
@ -310,7 +311,7 @@ class BalancerServiceImpl : public BalancerService {
// Read initial request.
LoadBalanceRequest request;
if (!stream->Read(&request)) {
gpr_log(GPR_INFO, "LB[%p]: stream read returned false", this);
LOG(INFO) << "LB[" << this << "]: stream read returned false";
return Status::OK;
}
EXPECT_TRUE(request.has_initial_request());
@ -319,8 +320,8 @@ class BalancerServiceImpl : public BalancerService {
service_names_.push_back(request.initial_request().name());
}
IncreaseRequestCount();
gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this,
request.DebugString().c_str());
LOG(INFO) << "LB[" << this << "]: received initial message '"
<< request.DebugString() << "'";
// Send initial response.
LoadBalanceResponse response;
auto* initial_response = response.mutable_initial_response();
@ -334,11 +335,11 @@ class BalancerServiceImpl : public BalancerService {
std::thread reader(std::bind(&BalancerServiceImpl::ReadThread, this, stream,
&reader_shutdown));
auto thread_cleanup = absl::MakeCleanup([&]() {
gpr_log(GPR_INFO, "shutting down reader thread");
LOG(INFO) << "shutting down reader thread";
reader_shutdown.Notify();
gpr_log(GPR_INFO, "joining reader thread");
LOG(INFO) << "joining reader thread";
reader.join();
gpr_log(GPR_INFO, "joining reader thread complete");
LOG(INFO) << "joining reader thread complete";
});
// Send responses as instructed by the test.
while (true) {
@ -347,12 +348,12 @@ class BalancerServiceImpl : public BalancerService {
context->TryCancel();
break;
}
gpr_log(GPR_INFO, "LB[%p]: Sending response: %s", this,
response->DebugString().c_str());
LOG(INFO) << "LB[" << this
<< "]: Sending response: " << response->DebugString();
IncreaseResponseCount();
stream->Write(*response);
}
gpr_log(GPR_INFO, "LB[%p]: done", this);
LOG(INFO) << "LB[" << this << "]: done";
return Status::OK;
}
@ -360,8 +361,8 @@ class BalancerServiceImpl : public BalancerService {
void ReadThread(Stream* stream, absl::Notification* shutdown) {
LoadBalanceRequest request;
while (!shutdown->HasBeenNotified() && stream->Read(&request)) {
gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'",
this, request.DebugString().c_str());
LOG(INFO) << "LB[" << this << "]: received client load report message '"
<< request.DebugString() << "'";
EXPECT_GT(client_load_reporting_interval_seconds_, 0);
EXPECT_TRUE(request.has_client_stats());
ClientStats load_report;
@ -451,7 +452,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
~ServerThread() { Shutdown(); }
void Start() {
gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
LOG(INFO) << "starting " << type_ << " server on port " << port_;
CHECK(!running_);
running_ = true;
service_.Start();
@ -463,7 +464,7 @@ class GrpclbEnd2endTest : public ::testing::Test {
thread_ = std::make_unique<std::thread>(
std::bind(&ServerThread::Serve, this, &mu, &cond));
cond.Wait(&mu);
gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
LOG(INFO) << type_ << " server startup complete";
}
void Serve(grpc_core::Mutex* mu, grpc_core::CondVar* cond) {
@ -481,11 +482,11 @@ class GrpclbEnd2endTest : public ::testing::Test {
void Shutdown() {
if (!running_) return;
gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
LOG(INFO) << type_ << " about to shutdown";
service_.Shutdown();
server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
thread_->join();
gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
LOG(INFO) << type_ << " shutdown completed";
running_ = false;
}
@ -667,8 +668,8 @@ class GrpclbEnd2endTest : public ::testing::Test {
size_t start_index = 0, size_t stop_index = 0,
WaitForBackendOptions options = WaitForBackendOptions(),
SourceLocation location = SourceLocation()) {
gpr_log(GPR_INFO, "Waiting for backends [%" PRIuPTR ", %" PRIuPTR ")",
start_index, stop_index);
LOG(INFO) << "Waiting for backends [" << start_index << ", " << stop_index
<< ")";
const absl::Time deadline =
absl::Now() +
absl::Seconds(options.timeout_seconds * grpc_test_slowdown_factor());
@ -689,11 +690,11 @@ class GrpclbEnd2endTest : public ::testing::Test {
SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
}
ResetBackendCounters();
gpr_log(GPR_INFO,
"Performed %d warm up requests (a multiple of %d) against the "
"backends. %d succeeded, %d failed, %d dropped.",
num_total, options.num_requests_multiple_of, num_ok, num_failure,
num_drops);
LOG(INFO) << "Performed " << num_total
<< " warm up requests (a multiple of "
<< options.num_requests_multiple_of << ") against the backends. "
<< num_ok << " succeeded, " << num_failure << " failed, "
<< num_drops << " dropped.";
return std::make_tuple(num_ok, num_failure, num_drops);
}
@ -1299,9 +1300,9 @@ TEST_F(GrpclbEnd2endTest,
SetNextResolutionDefaultBalancer();
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
LOG(INFO) << "========= BEFORE FIRST BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
LOG(INFO) << "========= DONE WITH FIRST BATCH ==========";
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->service().request_count());
EXPECT_EQ(0U, backends_[1]->service().request_count());
@ -1314,9 +1315,9 @@ TEST_F(GrpclbEnd2endTest,
// Now tell the channel to use balancer 2. However, the stream to the
// default balancer is not terminated, so the client will continue to
// use it.
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
LOG(INFO) << "========= ABOUT TO UPDATE 1 ==========";
SetNextResolution({balancer2->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
LOG(INFO) << "========= UPDATE 1 DONE ==========";
// Now the default balancer sends backend 2.
SendBalancerResponse(BuildResponseForBackends({backends_[2]->port()}, {}));
WaitForBackend(2);
@ -1339,9 +1340,9 @@ TEST_F(GrpclbEnd2endTest,
// Wait until the first backend is ready.
WaitForBackend(0);
// Send 10 requests.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
LOG(INFO) << "========= BEFORE FIRST BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
LOG(INFO) << "========= DONE WITH FIRST BATCH ==========";
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->service().request_count());
EXPECT_EQ(0U, backends_[1]->service().request_count());
@ -1351,15 +1352,15 @@ TEST_F(GrpclbEnd2endTest,
EXPECT_EQ(0U, balancer2->service().request_count());
EXPECT_EQ(0U, balancer2->service().response_count());
// Send another address list with the same list of balancers.
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
LOG(INFO) << "========= ABOUT TO UPDATE 1 ==========";
SetNextResolution({balancer_->port(), balancer2->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
LOG(INFO) << "========= UPDATE 1 DONE ==========";
// Shut down the balancer stream to force the client to create a new one.
// The new stream should go to the default balancer, since the
// underlying connection should not have been broken.
gpr_log(GPR_INFO, "========= SHUTTING DOWN BALANCER CALL ==========");
LOG(INFO) << "========= SHUTTING DOWN BALANCER CALL ==========";
balancer_->service().ShutdownStream();
gpr_log(GPR_INFO, "========= DONE SHUTTING DOWN BALANCER CALL ==========");
LOG(INFO) << "========= DONE SHUTTING DOWN BALANCER CALL ==========";
// Wait until client has created a new balancer stream.
EXPECT_TRUE(balancer_->service().WaitForNewStream(1));
// Make sure there was only one client connection seen by the balancer.
@ -1383,34 +1384,34 @@ TEST_F(GrpclbEnd2endTest, BalancerDiesThenSwitchToNewBalancer) {
EXPECT_EQ(0U, balancer2->service().request_count());
EXPECT_EQ(0U, balancer2->service().response_count());
// Send 10 RPCs.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
LOG(INFO) << "========= BEFORE FIRST BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
LOG(INFO) << "========= DONE WITH FIRST BATCH ==========";
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->service().request_count());
EXPECT_EQ(0U, backends_[1]->service().request_count());
// Kill default balancer.
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER *************");
LOG(INFO) << "********** ABOUT TO KILL BALANCER *************";
balancer_->Shutdown();
gpr_log(GPR_INFO, "********** KILLED BALANCER *************");
LOG(INFO) << "********** KILLED BALANCER *************";
// Channel should continue using the last backend it saw from the
// balancer before the balancer died.
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
LOG(INFO) << "========= BEFORE SECOND BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
LOG(INFO) << "========= DONE WITH SECOND BATCH ==========";
// All 10 requests should again have gone to the first backend.
EXPECT_EQ(20U, backends_[0]->service().request_count());
EXPECT_EQ(0U, backends_[1]->service().request_count());
// Tell channel to start using balancer 2.
gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
LOG(INFO) << "========= ABOUT TO UPDATE 1 ==========";
SetNextResolution({balancer2->port()});
gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
LOG(INFO) << "========= UPDATE 1 DONE ==========";
// Channel should start using backend 1.
WaitForBackend(1);
// This is serviced by the updated RR policy
gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
LOG(INFO) << "========= BEFORE THIRD BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
LOG(INFO) << "========= DONE WITH THIRD BATCH ==========";
// All 10 requests should have gone to the second backend.
EXPECT_EQ(0U, backends_[0]->service().request_count());
EXPECT_EQ(10U, backends_[1]->service().request_count());
@ -1428,15 +1429,15 @@ TEST_F(GrpclbEnd2endTest, ReresolveDeadBackendWhileInFallback) {
// responds, and a fallback backend.
SetNextResolution({balancer_->port()}, {backends_[0]->port()});
// Start servers and send 10 RPCs per server.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
LOG(INFO) << "========= BEFORE FIRST BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
LOG(INFO) << "========= DONE WITH FIRST BATCH ==========";
// All 10 requests should have gone to the fallback backend.
EXPECT_EQ(10U, backends_[0]->service().request_count());
// Kill backend 0.
gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
LOG(INFO) << "********** ABOUT TO KILL BACKEND 0 *************";
backends_[0]->Shutdown();
gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
LOG(INFO) << "********** KILLED BACKEND 0 *************";
// This should trigger re-resolution.
EXPECT_TRUE(response_generator_->WaitForReresolutionRequest(
absl::Seconds(5 * grpc_test_slowdown_factor())));
@ -1446,9 +1447,9 @@ TEST_F(GrpclbEnd2endTest, ReresolveDeadBackendWhileInFallback) {
// Wait until re-resolution has been seen, as signaled by the second backend
// receiving a request.
WaitForBackend(1);
gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
LOG(INFO) << "========= BEFORE SECOND BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
LOG(INFO) << "========= DONE WITH SECOND BATCH ==========";
// All 10 requests should have gone to the second backend.
EXPECT_EQ(10U, backends_[1]->service().request_count());
EXPECT_EQ(1U, balancer_->service().request_count());
@ -1467,9 +1468,9 @@ TEST_F(GrpclbEnd2endTest, ReresolveWhenBalancerCallFails) {
SetNextResolutionDefaultBalancer();
WaitForBackend(0);
// Send 10 RPCs.
gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
LOG(INFO) << "========= BEFORE FIRST BATCH ==========";
CheckRpcSendOk(10);
gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
LOG(INFO) << "========= DONE WITH FIRST BATCH ==========";
// All 10 requests should have gone to the first backend.
EXPECT_EQ(10U, backends_[0]->service().request_count());
// Balancer 0 got a single request and sent a single request.
@ -1478,13 +1479,13 @@ TEST_F(GrpclbEnd2endTest, ReresolveWhenBalancerCallFails) {
EXPECT_EQ(0U, balancer2->service().request_count());
EXPECT_EQ(0U, balancer2->service().response_count());
// Kill balancer 0.
gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
LOG(INFO) << "********** ABOUT TO KILL BALANCER 0 *************";
balancer_->Shutdown();
gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
LOG(INFO) << "********** KILLED BALANCER 0 *************";
// This should trigger a re-resolution.
EXPECT_TRUE(response_generator_->WaitForReresolutionRequest(
absl::Seconds(5 * grpc_test_slowdown_factor())));
gpr_log(GPR_INFO, "********** SAW RE-RESOLUTION REQUEST *************");
LOG(INFO) << "********** SAW RE-RESOLUTION REQUEST *************";
// Re-resolution result switches to balancer 2.
SetNextResolution({balancer2->port()});
// Client should start using backend 1.

Loading…
Cancel
Save