[Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log (#37148)

[Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log
In this CL we are migrating from gRPCs own gpr logging mechanism to absl logging mechanism. The intention is to deprecate gpr_log in the future.

We have the following mapping

1. gpr_log(GPR_INFO,...) -> LOG(INFO)
2. gpr_log(GPR_ERROR,...) -> LOG(ERROR)
3. gpr_log(GPR_DEBUG,...) -> VLOG(2)

Reviewers need to check :

1. If the above mapping is correct.
2. The content of the log is as before.
gpr_log format strings did not use string_view or std::string . absl LOG accepts these. So there will be some elimination of string_view and std::string related conversions. This is expected.

Closes #37148

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37148 from tanvi-jagtap:src_core_lib_grpp_time 525cf70e24
PiperOrigin-RevId: 650460526
pull/37152/head^2
Tanvi Jagtap 7 months ago committed by Copybara-Service
parent b0e1a52da2
commit f64b0494c9
  1. 3
      src/core/BUILD
  2. 18
      src/core/lib/event_engine/posix_engine/posix_endpoint.cc
  3. 17
      src/core/lib/event_engine/posix_engine/tcp_socket_utils.cc
  4. 5
      src/core/lib/event_engine/thread_pool/thread_count.cc
  5. 6
      src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc
  6. 4
      src/core/lib/event_engine/windows/windows_engine.cc
  7. 16
      src/core/lib/gprpp/time.h
  8. 17
      src/core/lib/iomgr/socket_utils_common_posix.cc
  9. 6
      src/core/lib/iomgr/tcp_posix.cc
  10. 3
      src/core/lib/iomgr/tcp_server_posix.cc
  11. 8
      test/core/event_engine/event_engine_test_utils.cc

@ -1910,6 +1910,7 @@ grpc_cc_library(
],
hdrs = ["lib/event_engine/thread_pool/thread_count.h"],
external_deps = [
"absl/log:log",
"absl/base:core_headers",
"absl/time",
"absl/status",
@ -2280,7 +2281,7 @@ grpc_cc_library(
"absl/container:flat_hash_map",
"absl/functional:any_invocable",
"absl/hash",
"absl/log",
"absl/log:log",
"absl/log:check",
"absl/status",
"absl/status:statusor",

@ -970,15 +970,15 @@ bool PosixEndpointImpl::DoFlushZerocopy(TcpZerocopySendRecord* record,
handle_->SetWritable();
} else {
#ifdef GRPC_LINUX_ERRQUEUE
GRPC_LOG_EVERY_N_SEC(
1, GPR_INFO,
"Tx0cp encountered an ENOBUFS error possibly because one or "
"both of RLIMIT_MEMLOCK or hard memlock ulimit values are too "
"small for the intended user. Current system value of "
"RLIMIT_MEMLOCK is %" PRIu64 " and hard memlock ulimit is %" PRIu64
".Consider increasing these values appropriately for the intended "
"user.",
GetRLimitMemLockMax(), GetUlimitHardMemLock());
LOG_EVERY_N_SEC(INFO, 1)
<< "Tx0cp encountered an ENOBUFS error possibly because one or "
"both of RLIMIT_MEMLOCK or hard memlock ulimit values are too "
"small for the intended user. Current system value of "
"RLIMIT_MEMLOCK is "
<< GetRLimitMemLockMax() << " and hard memlock ulimit is "
<< GetUlimitHardMemLock()
<< ".Consider increasing these values appropriately for the "
"intended user.";
#endif
}
}

@ -106,14 +106,15 @@ int CreateSocket(std::function<int(int, int, int)> socket_factory, int family,
: socket(family, type, protocol);
if (res < 0 && errno == EMFILE) {
int saved_errno = errno;
GRPC_LOG_EVERY_N_SEC(
10, GPR_ERROR,
"socket(%d, %d, %d) returned %d with error: |%s|. This process "
"might not have a sufficient file descriptor limit for the number "
"of connections grpc wants to open (which is generally a function of "
"the number of grpc channels, the lb policy of each channel, and the "
"number of backends each channel is load balancing across).",
family, type, protocol, res, grpc_core::StrError(errno).c_str());
LOG_EVERY_N_SEC(ERROR, 10)
<< "socket(" << family << ", " << type << ", " << protocol
<< ") returned " << res << " with error: |"
<< grpc_core::StrError(errno)
<< "|. This process might not have a sufficient file descriptor limit "
"for the number of connections grpc wants to open (which is "
"generally a function of the number of grpc channels, the lb policy "
"of each channel, and the number of backends each channel is load "
"balancing across).";
errno = saved_errno;
}
return res;

@ -17,6 +17,7 @@
#include <cstddef>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
@ -52,8 +53,8 @@ absl::Status LivingThreadCount::BlockUntilThreadCount(
prev_thread_count = curr_threads;
timeout_baseline = grpc_core::Timestamp::Now();
}
GRPC_LOG_EVERY_N_SEC_DELAYED(
log_rate.seconds(), GPR_DEBUG,
GRPC_LOG_EVERY_N_SEC_DELAYED_DEBUG(
log_rate.seconds(),
"Waiting for thread pool to idle before %s. (%" PRIdPTR " to %" PRIdPTR
"). Timing out in %0.f seconds.",
why, curr_threads, desired_threads,

@ -412,9 +412,9 @@ void WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::
WorkStealingThreadPool::WorkStealingThreadPoolImpl::Lifeguard::~Lifeguard() {
lifeguard_should_shut_down_->Notify();
while (lifeguard_running_.load(std::memory_order_relaxed)) {
GRPC_LOG_EVERY_N_SEC_DELAYED(kBlockingQuiesceLogRateSeconds, GPR_DEBUG,
"%s",
"Waiting for lifeguard thread to shut down");
GRPC_LOG_EVERY_N_SEC_DELAYED_DEBUG(
kBlockingQuiesceLogRateSeconds, "%s",
"Waiting for lifeguard thread to shut down");
lifeguard_is_shut_down_->WaitForNotification();
}
// Do an additional wait in case this method races with LifeguardMain's

@ -228,8 +228,8 @@ WindowsEventEngine::~WindowsEventEngine() {
timer_manager_.Now() + grpc_core::Duration::FromSecondsAsDouble(10);
while (!known_handles_.empty() && timer_manager_.Now() < deadline) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) {
GRPC_LOG_EVERY_N_SEC(1, GPR_DEBUG, "Waiting for timers. %d remaining",
known_handles_.size());
VLOG_EVERY_N_SEC(2, 1) << "Waiting for timers. "
<< known_handles_.size() << " remaining";
}
task_mu_.Unlock();
absl::SleepFor(absl::Milliseconds(200));

@ -31,19 +31,7 @@
#include "src/core/util/time_precise.h"
#include "src/core/util/useful.h"
#define GRPC_LOG_EVERY_N_SEC(n, severity, format, ...) \
do { \
static std::atomic<uint64_t> prev{0}; \
uint64_t now = grpc_core::Timestamp::FromTimespecRoundDown( \
gpr_now(GPR_CLOCK_MONOTONIC)) \
.milliseconds_after_process_epoch(); \
if (prev == 0 || now - prev > (n) * 1000) { \
prev = now; \
gpr_log(severity, format, __VA_ARGS__); \
} \
} while (0)
#define GRPC_LOG_EVERY_N_SEC_DELAYED(n, severity, format, ...) \
#define GRPC_LOG_EVERY_N_SEC_DELAYED_DEBUG(n, format, ...) \
do { \
static std::atomic<uint64_t> prev{0}; \
uint64_t now = grpc_core::Timestamp::FromTimespecRoundDown( \
@ -52,7 +40,7 @@
if (prev == 0) prev = now; \
if (now - prev > (n) * 1000) { \
prev = now; \
gpr_log(severity, format, __VA_ARGS__); \
VLOG(2) << absl::StrFormat(format, __VA_ARGS__); \
} \
} while (0)

@ -477,14 +477,15 @@ static int create_socket(grpc_socket_factory* factory, int domain, int type,
: socket(domain, type, protocol);
if (res < 0 && errno == EMFILE) {
int saved_errno = errno;
GRPC_LOG_EVERY_N_SEC(
10, GPR_ERROR,
"socket(%d, %d, %d) returned %d with error: |%s|. This process "
"might not have a sufficient file descriptor limit for the number "
"of connections grpc wants to open (which is generally a function of "
"the number of grpc channels, the lb policy of each channel, and the "
"number of backends each channel is load balancing across).",
domain, type, protocol, res, grpc_core::StrError(errno).c_str());
LOG_EVERY_N_SEC(ERROR, 10)
<< "socket(" << domain << ", " << type << ", " << protocol
<< ") returned " << res << " with error: |"
<< grpc_core::StrError(errno)
<< "|. This process might not have a sufficient file descriptor limit "
"for the number of connections grpc wants to open (which is "
"generally a function of the number of grpc channels, the lb policy "
"of each channel, and the number of backends each channel is load "
"balancing across).";
errno = saved_errno;
}
return res;

@ -592,9 +592,9 @@ void LogCommonIOErrors(absl::string_view prefix, int error_no) {
return;
default:
grpc_core::global_stats().IncrementUncommonIoErrorCount();
GRPC_LOG_EVERY_N_SEC(1, GPR_ERROR, "%s encountered uncommon error: %s",
prefix.data(),
grpc_core::StrError(error_no).c_str());
LOG_EVERY_N_SEC(ERROR, 1)
<< prefix.data()
<< " encountered uncommon error: " << grpc_core::StrError(error_no);
return;
}
}

@ -394,8 +394,7 @@ static void on_read(void* arg, grpc_error_handle err) {
// This is not a performant code path, but if an fd limit has been
// reached, the system is likely in an unhappy state regardless.
if (errno == EMFILE) {
GRPC_LOG_EVERY_N_SEC(1, GPR_ERROR, "%s",
"File descriptor limit reached. Retrying.");
LOG_EVERY_N_SEC(ERROR, 1) << "File descriptor limit reached. Retrying.";
grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
if (gpr_atm_full_xchg(&sp->retry_timer_armed, true)) return;
grpc_timer_init(&sp->retry_timer,

@ -97,10 +97,10 @@ void WaitForSingleOwnerWithTimeout(std::shared_ptr<EventEngine> engine,
if (remaining < std::chrono::seconds{0}) {
grpc_core::Crash("Timed out waiting for a single EventEngine owner");
}
GRPC_LOG_EVERY_N_SEC(
2, GPR_INFO, "engine.use_count() = %ld timeout_remaining = %s",
engine.use_count(),
absl::FormatDuration(absl::Nanoseconds(remaining.count())).c_str());
LOG_EVERY_N_SEC(INFO, 2)
<< "engine.use_count() = " << engine.use_count()
<< " timeout_remaining = "
<< absl::FormatDuration(absl::Nanoseconds(remaining.count()));
absl::SleepFor(absl::Milliseconds(100));
}
}

Loading…
Cancel
Save