[Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log (#37093)

[Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log
In this CL we are migrating from gRPCs own gpr logging mechanism to absl logging mechanism. The intention is to deprecate gpr_log in the future.

We have the following mapping

1. gpr_log(GPR_INFO,...) -> LOG(INFO)
2. gpr_log(GPR_ERROR,...) -> LOG(ERROR)
3. gpr_log(GPR_DEBUG,...) -> VLOG(2)

Reviewers need to check :

1. If the above mapping is correct.
2. The content of the log is as before.
gpr_log format strings did not use string_view or std::string . absl LOG accepts these. So there will be some elimination of string_view and std::string related conversions. This is expected.

Closes #37093

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/37093 from tanvi-jagtap:src_core_lib_misc 4c67a380e0
PiperOrigin-RevId: 648292698
pull/37123/head
Tanvi Jagtap 10 months ago committed by Copybara-Service
parent d309fe2f73
commit 74b7c7248b
  1. 33
      src/core/lib/gprpp/work_serializer.cc
  2. 29
      src/core/lib/resource_quota/memory_quota.cc
  3. 5
      src/core/lib/resource_quota/memory_quota.h
  4. 10
      src/core/lib/slice/slice_refcount.h

@ -31,7 +31,6 @@
#include "absl/log/log.h" #include "absl/log/log.h"
#include <grpc/event_engine/event_engine.h> #include <grpc/event_engine/event_engine.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
@ -138,8 +137,8 @@ class WorkSerializer::LegacyWorkSerializer final : public WorkSerializerImpl {
void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback, void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
const DebugLocation& location) { const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::Run() %p Scheduling callback [%s:%d]", LOG(INFO) << "WorkSerializer::Run() " << this << " Scheduling callback ["
this, location.file(), location.line()); << location.file() << ":" << location.line() << "]";
} }
// Increment queue size for the new callback and owner count to attempt to // Increment queue size for the new callback and owner count to attempt to
// take ownership of the WorkSerializer. // take ownership of the WorkSerializer.
@ -164,7 +163,7 @@ void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
CallbackWrapper* cb_wrapper = CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location); new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, " Scheduling on queue : item %p", cb_wrapper); LOG(INFO) << " Scheduling on queue : item " << cb_wrapper;
} }
queue_.Push(&cb_wrapper->mpscq_node); queue_.Push(&cb_wrapper->mpscq_node);
} }
@ -175,9 +174,9 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
CallbackWrapper* cb_wrapper = CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location); new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, LOG(INFO) << "WorkSerializer::Schedule() " << this
"WorkSerializer::Schedule() %p Scheduling callback %p [%s:%d]", << " Scheduling callback " << cb_wrapper << " ["
this, cb_wrapper, location.file(), location.line()); << location.file() << ":" << location.line() << "]";
} }
refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_acq_rel); refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_acq_rel);
queue_.Push(&cb_wrapper->mpscq_node); queue_.Push(&cb_wrapper->mpscq_node);
@ -185,7 +184,7 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
void WorkSerializer::LegacyWorkSerializer::Orphan() { void WorkSerializer::LegacyWorkSerializer::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::Orphan() %p", this); LOG(INFO) << "WorkSerializer::Orphan() " << this;
} }
const uint64_t prev_ref_pair = const uint64_t prev_ref_pair =
refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel); refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel);
@ -199,7 +198,7 @@ void WorkSerializer::LegacyWorkSerializer::Orphan() {
// execute all the scheduled callbacks. // execute all the scheduled callbacks.
void WorkSerializer::LegacyWorkSerializer::DrainQueue() { void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::DrainQueue() %p", this); LOG(INFO) << "WorkSerializer::DrainQueue() " << this;
} }
// Attempt to take ownership of the WorkSerializer. Also increment the queue // Attempt to take ownership of the WorkSerializer. Also increment the queue
// size as required by `DrainQueueOwned()`. // size as required by `DrainQueueOwned()`.
@ -220,7 +219,7 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() { void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::DrainQueueOwned() %p", this); LOG(INFO) << "WorkSerializer::DrainQueueOwned() " << this;
} }
while (true) { while (true) {
auto prev_ref_pair = refs_.fetch_sub(MakeRefPair(0, 1)); auto prev_ref_pair = refs_.fetch_sub(MakeRefPair(0, 1));
@ -267,9 +266,9 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
<< " Queue returned nullptr, trying again"; << " Queue returned nullptr, trying again";
} }
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, " Running item %p : callback scheduled at [%s:%d]", LOG(INFO) << " Running item " << cb_wrapper
cb_wrapper, cb_wrapper->location.file(), << " : callback scheduled at [" << cb_wrapper->location.file()
cb_wrapper->location.line()); << ":" << cb_wrapper->location.line() << "]";
} }
cb_wrapper->callback(); cb_wrapper->callback();
delete cb_wrapper; delete cb_wrapper;
@ -407,8 +406,8 @@ void WorkSerializer::DispatchingWorkSerializer::Orphan() {
void WorkSerializer::DispatchingWorkSerializer::Run( void WorkSerializer::DispatchingWorkSerializer::Run(
std::function<void()> callback, const DebugLocation& location) { std::function<void()> callback, const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer[%p] Scheduling callback [%s:%d]", this, LOG(INFO) << "WorkSerializer[" << this << "] Scheduling callback ["
location.file(), location.line()); << location.file() << ":" << location.line() << "]";
} }
global_stats().IncrementWorkSerializerItemsEnqueued(); global_stats().IncrementWorkSerializerItemsEnqueued();
MutexLock lock(&mu_); MutexLock lock(&mu_);
@ -440,8 +439,8 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
// queue since processing_ is stored in reverse order. // queue since processing_ is stored in reverse order.
auto& cb = processing_.back(); auto& cb = processing_.back();
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) { if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer[%p] Executing callback [%s:%d]", this, LOG(INFO) << "WorkSerializer[" << this << "] Executing callback ["
cb.location.file(), cb.location.line()); << cb.location.file() << ":" << cb.location.line() << "]";
} }
// Run the work item. // Run the work item.
const auto start = std::chrono::steady_clock::now(); const auto start = std::chrono::steady_clock::now();

@ -26,6 +26,7 @@
#include <utility> #include <utility>
#include "absl/log/check.h" #include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h" #include "absl/status/status.h"
#include "absl/strings/str_cat.h" #include "absl/strings/str_cat.h"
@ -355,7 +356,7 @@ void GrpcMemoryAllocatorImpl::MaybeDonateBack() {
std::memory_order_acq_rel, std::memory_order_acq_rel,
std::memory_order_acquire)) { std::memory_order_acquire)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "[%p] Early return %" PRIdPTR " bytes", this, ret); LOG(INFO) << "[" << this << "] Early return " << ret << " bytes";
} }
CHECK(taken_bytes_.fetch_sub(ret, std::memory_order_relaxed) >= ret); CHECK(taken_bytes_.fetch_sub(ret, std::memory_order_relaxed) >= ret);
memory_quota_->Return(ret); memory_quota_->Return(ret);
@ -452,10 +453,9 @@ void BasicMemoryQuota::Start() {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
double free = std::max(intptr_t{0}, self->free_bytes_.load()); double free = std::max(intptr_t{0}, self->free_bytes_.load());
size_t quota_size = self->quota_size_.load(); size_t quota_size = self->quota_size_.load();
gpr_log(GPR_INFO, LOG(INFO) << "RQ: " << self->name_ << " perform " << std::get<0>(arg)
"RQ: %s perform %s reclamation. Available free bytes: %f, " << " reclamation. Available free bytes: " << free
"total quota_size: %zu", << ", total quota_size: " << quota_size;
self->name_.c_str(), std::get<0>(arg), free, quota_size);
} }
// One of the reclaimer queues gave us a way to get back memory. // One of the reclaimer queues gave us a way to get back memory.
// Call the reclaimer with a token that contains enough to wake us // Call the reclaimer with a token that contains enough to wake us
@ -535,10 +535,9 @@ void BasicMemoryQuota::FinishReclamation(uint64_t token, Waker waker) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
double free = std::max(intptr_t{0}, free_bytes_.load()); double free = std::max(intptr_t{0}, free_bytes_.load());
size_t quota_size = quota_size_.load(); size_t quota_size = quota_size_.load();
gpr_log(GPR_INFO, LOG(INFO) << "RQ: " << name_
"RQ: %s reclamation complete. Available free bytes: %f, " << " reclamation complete. Available free bytes: " << free
"total quota_size: %zu", << ", total quota_size: " << quota_size;
name_.c_str(), free, quota_size);
} }
waker.Wakeup(); waker.Wakeup();
} }
@ -550,7 +549,7 @@ void BasicMemoryQuota::Return(size_t amount) {
void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) { void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "Adding allocator %p", allocator); LOG(INFO) << "Adding allocator " << allocator;
} }
AllocatorBucket::Shard& shard = small_allocators_.SelectShard(allocator); AllocatorBucket::Shard& shard = small_allocators_.SelectShard(allocator);
@ -563,7 +562,7 @@ void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
void BasicMemoryQuota::RemoveAllocator(GrpcMemoryAllocatorImpl* allocator) { void BasicMemoryQuota::RemoveAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "Removing allocator %p", allocator); LOG(INFO) << "Removing allocator " << allocator;
} }
AllocatorBucket::Shard& small_shard = AllocatorBucket::Shard& small_shard =
@ -610,7 +609,7 @@ void BasicMemoryQuota::MaybeMoveAllocator(GrpcMemoryAllocatorImpl* allocator,
void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall( void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
GrpcMemoryAllocatorImpl* allocator) { GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "Moving allocator %p to small", allocator); LOG(INFO) << "Moving allocator " << allocator << " to small";
} }
AllocatorBucket::Shard& old_shard = big_allocators_.SelectShard(allocator); AllocatorBucket::Shard& old_shard = big_allocators_.SelectShard(allocator);
@ -631,7 +630,7 @@ void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
void BasicMemoryQuota::MaybeMoveAllocatorSmallToBig( void BasicMemoryQuota::MaybeMoveAllocatorSmallToBig(
GrpcMemoryAllocatorImpl* allocator) { GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "Moving allocator %p to big", allocator); LOG(INFO) << "Moving allocator " << allocator << " to big";
} }
AllocatorBucket::Shard& old_shard = small_allocators_.SelectShard(allocator); AllocatorBucket::Shard& old_shard = small_allocators_.SelectShard(allocator);
@ -768,8 +767,8 @@ double PressureTracker::AddSampleAndGetControlValue(double sample) {
report = controller_.Update(current_estimate - kSetPoint); report = controller_.Update(current_estimate - kSetPoint);
} }
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "RQ: pressure:%lf report:%lf controller:%s", LOG(INFO) << "RQ: pressure:" << current_estimate << " report:" << report
current_estimate, report, controller_.DebugString().c_str()); << " controller:" << controller_.DebugString();
} }
report_.store(report, std::memory_order_relaxed); report_.store(report, std::memory_order_relaxed);
}); });

@ -29,12 +29,12 @@
#include "absl/base/thread_annotations.h" #include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h" #include "absl/container/flat_hash_set.h"
#include "absl/log/check.h" #include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"
#include "absl/types/optional.h" #include "absl/types/optional.h"
#include <grpc/event_engine/memory_allocator.h> #include <grpc/event_engine/memory_allocator.h>
#include <grpc/event_engine/memory_request.h> #include <grpc/event_engine/memory_request.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
@ -426,7 +426,8 @@ class GrpcMemoryAllocatorImpl final : public EventEngineMemoryAllocatorImpl {
size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel); size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel);
if (ret == 0) return; if (ret == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) { if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "Allocator %p returning %zu bytes to quota", this, ret); LOG(INFO) << "Allocator " << this << " returning " << ret
<< " bytes to quota";
} }
taken_bytes_.fetch_sub(ret, std::memory_order_relaxed); taken_bytes_.fetch_sub(ret, std::memory_order_relaxed);
memory_quota_->Return(ret); memory_quota_->Return(ret);

@ -20,7 +20,6 @@
#include <atomic> #include <atomic>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h" #include "src/core/lib/debug/trace.h"
@ -48,16 +47,15 @@ struct grpc_slice_refcount {
void Ref(grpc_core::DebugLocation location) { void Ref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed); auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) { if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) {
gpr_log(location.file(), location.line(), GPR_LOG_SEVERITY_INFO, LOG(INFO).AtLocation(location.file(), location.line())
"REF %p %" PRIdPTR "->%" PRIdPTR, this, prev_refs, prev_refs + 1); << "REF " << this << " " << prev_refs << "->" << prev_refs + 1;
} }
} }
void Unref(grpc_core::DebugLocation location) { void Unref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel); auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel);
if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) { if (GRPC_TRACE_FLAG_ENABLED(slice_refcount)) {
gpr_log(location.file(), location.line(), GPR_LOG_SEVERITY_INFO, LOG(INFO).AtLocation(location.file(), location.line())
"UNREF %p %" PRIdPTR "->%" PRIdPTR, this, prev_refs, << "UNREF " << this << " " << prev_refs << "->" << prev_refs - 1;
prev_refs - 1);
} }
if (prev_refs == 1) { if (prev_refs == 1) {
destroyer_fn_(this); destroyer_fn_(this);

Loading…
Cancel
Save