[Gpr_To_Absl_Logging] Using GRPC_TRACE_LOG instead of GRPC_TRACE_FLAG_ENABLED

pull/37516/head
tanvi-jagtap 3 months ago
parent d59586c8fc
commit c21f60f58a
  1. 2
      src/core/ext/transport/chaotic_good/server_transport.cc
  2. 18
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  3. 2
      src/core/ext/transport/chttp2/transport/writing.cc
  4. 4
      src/core/lib/channel/promise_based_filter.cc
  5. 8
      src/core/lib/event_engine/posix_engine/timer_manager.cc
  6. 12
      src/core/lib/iomgr/cfstream_handle.cc
  7. 4
      src/core/lib/iomgr/closure.h
  8. 26
      src/core/lib/iomgr/endpoint_cfstream.cc
  9. 2
      src/core/lib/iomgr/ev_epoll1_linux.cc
  10. 4
      src/core/lib/iomgr/ev_poll_posix.cc
  11. 4
      src/core/lib/iomgr/event_engine_shims/closure.cc
  12. 4
      src/core/lib/iomgr/exec_ctx.cc
  13. 8
      src/core/lib/iomgr/lockfree_event.cc
  14. 13
      src/core/lib/iomgr/tcp_client_cfstream.cc
  15. 31
      src/core/lib/iomgr/timer_generic.cc
  16. 12
      src/core/lib/promise/interceptor_list.h
  17. 2
      src/core/lib/promise/pipe.h
  18. 4
      src/core/lib/security/authorization/grpc_server_authz_filter.cc
  19. 2
      src/core/lib/transport/call_filters.cc
  20. 8
      src/core/resolver/dns/native/dns_resolver.cc
  21. 2
      src/cpp/ext/gcp/environment_autodetect.cc

@ -203,7 +203,7 @@ auto ChaoticGoodServerTransport::CallOutboundLoop(
Map(SendCallInitialMetadataAndBody(stream_id, outgoing_frames,
call_initiator),
[stream_id](absl::Status main_body_result) {
GRPC_TRACE_LOG(chaotic_good, 2)
GRPC_TRACE_VLOG(chaotic_good, 2)
<< "CHAOTIC_GOOD: CallOutboundLoop: stream_id=" << stream_id
<< " main_body_result=" << main_body_result;
return Empty{};

@ -2084,7 +2084,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
// Lambda is immediately invoked as a big scoped section that can be
// exited out of at any point by returning.
[&]() {
GRPC_TRACE_LOG(http, 2)
GRPC_TRACE_VLOG(http, 2)
<< "maybe_complete_recv_message " << s
<< " final_metadata_requested=" << s->final_metadata_requested
<< " seen_error=" << s->seen_error;
@ -2098,7 +2098,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
int64_t min_progress_size;
auto r = grpc_deframe_unprocessed_incoming_frames(
s, &min_progress_size, &**s->recv_message, s->recv_message_flags);
GRPC_TRACE_LOG(http, 2)
GRPC_TRACE_VLOG(http, 2)
<< "Deframe data frame: "
<< grpc_core::PollToString(
r, [](absl::Status r) { return r.ToString(); });
@ -2152,12 +2152,12 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) {
grpc_chttp2_maybe_complete_recv_message(t, s);
GRPC_TRACE_LOG(http, 2) << "maybe_complete_recv_trailing_metadata cli="
<< t->is_client << " s=" << s
<< " closure=" << s->recv_trailing_metadata_finished
<< " read_closed=" << s->read_closed
<< " write_closed=" << s->write_closed << " "
<< s->frame_storage.length;
GRPC_TRACE_VLOG(http, 2) << "maybe_complete_recv_trailing_metadata cli="
<< t->is_client << " s=" << s
<< " closure=" << s->recv_trailing_metadata_finished
<< " read_closed=" << s->read_closed
<< " write_closed=" << s->write_closed << " "
<< s->frame_storage.length;
if (s->recv_trailing_metadata_finished != nullptr && s->read_closed &&
s->write_closed) {
if (s->seen_error || !t->is_client) {
@ -2361,7 +2361,7 @@ grpc_chttp2_transport::RemovedStreamHandle grpc_chttp2_mark_stream_closed(
grpc_chttp2_transport* t, grpc_chttp2_stream* s, int close_reads,
int close_writes, grpc_error_handle error) {
grpc_chttp2_transport::RemovedStreamHandle rsh;
GRPC_TRACE_LOG(http, 2)
GRPC_TRACE_VLOG(http, 2)
<< "MARK_STREAM_CLOSED: t=" << t << " s=" << s << "(id=" << s->id << ") "
<< ((close_reads && close_writes)
? "read+write"

@ -204,7 +204,7 @@ static bool update_list(grpc_chttp2_transport* t, int64_t send_bytes,
static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
GRPC_TRACE_LOG(flowctl, 2)
GRPC_TRACE_VLOG(flowctl, 2)
<< t->peer_string.as_string_view() << ":" << t << " stream " << s->id
<< " moved to stalled list by " << staller
<< ". This is FULLY expected to happen in a healthy program that is not "

@ -2111,7 +2111,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ServerCallData::Completed(grpc_error_handle error,
bool tarpit_cancellation, Flusher* flusher) {
GRPC_TRACE_LOG(channel, 2)
GRPC_TRACE_VLOG(channel, 2)
<< LogTag() << "ServerCallData::Completed: send_trailing_state="
<< StateString(send_trailing_state_) << " send_initial_state="
<< (send_initial_metadata_ == nullptr
@ -2386,7 +2386,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
flusher,
send_initial_metadata_ == nullptr ||
send_initial_metadata_->state == SendInitialMetadata::kForwarded);
GRPC_TRACE_LOG(channel, 2)
GRPC_TRACE_VLOG(channel, 2)
<< LogTag() << ": After send_message WakeInsideCombiner "
<< DebugString() << " is_idle=" << send_message()->IsIdle()
<< " is_forwarded=" << send_message()->IsForwarded();

@ -118,13 +118,13 @@ void TimerManager::Shutdown() {
{
grpc_core::MutexLock lock(&mu_);
if (shutdown_) return;
GRPC_TRACE_LOG(timer, 2) << "TimerManager::" << this << " shutting down";
GRPC_TRACE_VLOG(timer, 2) << "TimerManager::" << this << " shutting down";
shutdown_ = true;
// Wait on the main loop to exit.
cv_wait_.Signal();
}
main_loop_exit_signal_->WaitForNotification();
GRPC_TRACE_LOG(timer, 2) << "TimerManager::" << this << " shutdown complete";
GRPC_TRACE_VLOG(timer, 2) << "TimerManager::" << this << " shutdown complete";
}
TimerManager::~TimerManager() { Shutdown(); }
@ -140,8 +140,8 @@ void TimerManager::Kick() {
void TimerManager::RestartPostFork() {
grpc_core::MutexLock lock(&mu_);
CHECK(GPR_LIKELY(shutdown_));
GRPC_TRACE_LOG(timer, 2) << "TimerManager::" << this
<< " restarting after shutdown";
GRPC_TRACE_VLOG(timer, 2)
<< "TimerManager::" << this << " restarting after shutdown";
shutdown_ = false;
main_loop_exit_signal_.emplace();
thread_pool_->Run([this]() { MainLoop(); });

@ -65,9 +65,9 @@ void CFStreamHandle::ReadCallback(CFReadStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(client_callback_info);
GRPC_TRACE_LOG(tcp, 2) << "CFStream ReadCallback (" << handle << ", "
<< stream << ", " << type << ", "
<< client_callback_info << ")";
GRPC_TRACE_VLOG(tcp, 2) << "CFStream ReadCallback (" << handle << ", "
<< stream << ", " << type << ", "
<< client_callback_info << ")";
switch (type) {
case kCFStreamEventOpenCompleted:
handle->open_event_.SetReady();
@ -98,9 +98,9 @@ void CFStreamHandle::WriteCallback(CFWriteStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(clientCallBackInfo);
GRPC_TRACE_LOG(tcp, 2) << "CFStream WriteCallback (" << handle << ", "
<< stream << ", " << type << ", " << clientCallBackInfo
<< ")";
GRPC_TRACE_VLOG(tcp, 2) << "CFStream WriteCallback (" << handle << ", "
<< stream << ", " << type << ", "
<< clientCallBackInfo << ")";
switch (type) {
case kCFStreamEventOpenCompleted:
handle->open_event_.SetReady();

@ -292,7 +292,7 @@ class Closure {
return;
}
#ifndef NDEBUG
GRPC_TRACE_LOG(closure, 2)
GRPC_TRACE_VLOG(closure, 2)
<< "running closure " << closure << ": created ["
<< closure->file_created << ":" << closure->line_created << "]: run ["
<< location.file() << ":" << location.line() << "]";
@ -300,7 +300,7 @@ class Closure {
#endif
closure->cb(closure->cb_arg, error);
#ifndef NDEBUG
GRPC_TRACE_LOG(closure, 2) << "closure " << closure << " finished";
GRPC_TRACE_VLOG(closure, 2) << "closure " << closure << " finished";
#endif
}
};

@ -129,10 +129,10 @@ static void CallReadCb(CFStreamEndpoint* ep, grpc_error_handle error) {
}
static void CallWriteCb(CFStreamEndpoint* ep, grpc_error_handle error) {
GRPC_TRACE_LOG(tcp, 2) << "CFStream endpoint:" << ep << " call_write_cb "
<< ep->write_cb << " " << ep->write_cb->cb << ":"
<< ep->write_cb->cb_arg
<< "write: error=" << grpc_core::StatusToString(error);
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep << " call_write_cb "
<< ep->write_cb << " " << ep->write_cb->cb << ":"
<< ep->write_cb->cb_arg << "write: error="
<< grpc_core::StatusToString(error);
grpc_closure* cb = ep->write_cb;
ep->write_cb = nullptr;
ep->write_slices = nullptr;
@ -232,9 +232,9 @@ static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, bool /*urgent*/,
int /*min_progress_size*/) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
GRPC_TRACE_LOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " read ("
<< slices << ", " << cb
<< ") length:" << slices->length;
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " read ("
<< slices << ", " << cb
<< ") length:" << slices->length;
CHECK_EQ(ep_impl->read_cb, nullptr);
ep_impl->read_cb = cb;
ep_impl->read_slices = slices;
@ -249,9 +249,9 @@ static void CFStreamWrite(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, void* /*arg*/,
int /*max_frame_size*/) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
GRPC_TRACE_LOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " write ("
<< slices << ", " << cb
<< ") length:" << slices->length;
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " write ("
<< slices << ", " << cb
<< ") length:" << slices->length;
CHECK_EQ(ep_impl->write_cb, nullptr);
ep_impl->write_cb = cb;
ep_impl->write_slices = slices;
@ -305,9 +305,9 @@ grpc_endpoint* grpc_cfstream_endpoint_create(CFReadStreamRef read_stream,
const char* peer_string,
CFStreamHandle* stream_sync) {
CFStreamEndpoint* ep_impl = new CFStreamEndpoint;
GRPC_TRACE_LOG(tcp, 2) << "CFStream endpoint:" << ep_impl
<< " create readStream:" << read_stream
<< " writeStream: " << write_stream;
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep_impl
<< " create readStream:" << read_stream
<< " writeStream: " << write_stream;
ep_impl->base.vtable = &vtable;
gpr_ref_init(&ep_impl->refcount, 1);
ep_impl->read_stream = read_stream;

@ -360,7 +360,7 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name.c_str());
fork_fd_list_add_grpc_fd(new_fd);
#ifndef NDEBUG
GRPC_TRACE_LOG(fd_refcount, 2)
GRPC_TRACE_VLOG(fd_refcount, 2)
<< "FD " << fd << " " << new_fd << " create " << fd_name;
#endif

@ -332,7 +332,7 @@ static void fork_fd_list_add_wakeup_fd(grpc_cached_wakeup_fd* fd) {
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
GRPC_TRACE_LOG(fd_refcount, 2)
GRPC_TRACE_VLOG(fd_refcount, 2)
<< "FD " << fd->fd << " " << fd << " ref " << n << " "
<< gpr_atm_no_barrier_load(&fd->refst) << " -> "
<< gpr_atm_no_barrier_load(&fd->refst) + n << " [" << reason << "; "
@ -356,7 +356,7 @@ static void ref_by(grpc_fd* fd, int n) {
#ifndef NDEBUG
static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
GRPC_TRACE_LOG(fd_refcount, 2)
GRPC_TRACE_VLOG(fd_refcount, 2)
<< "FD " << fd->fd << " " << fd << " unref " << n << " "
<< gpr_atm_no_barrier_load(&fd->refst) << " -> "
<< gpr_atm_no_barrier_load(&fd->refst) - n << " [" << reason << "; "

@ -35,7 +35,7 @@ void RunEventEngineClosure(grpc_closure* closure, grpc_error_handle error) {
grpc_core::ExecCtx exec_ctx;
#ifndef NDEBUG
closure->scheduled = false;
GRPC_TRACE_LOG(closure, 2)
GRPC_TRACE_VLOG(closure, 2)
<< "EventEngine: running closure " << closure << ": created ["
<< closure->file_created << ":" << closure->line_created
<< "]: " << (closure->run ? "run" : "scheduled") << " ["
@ -43,7 +43,7 @@ void RunEventEngineClosure(grpc_closure* closure, grpc_error_handle error) {
#endif
closure->cb(closure->cb_arg, error);
#ifndef NDEBUG
GRPC_TRACE_LOG(closure, 2)
GRPC_TRACE_VLOG(closure, 2)
<< "EventEngine: closure " << closure << " finished";
#endif
}

@ -32,7 +32,7 @@
static void exec_ctx_run(grpc_closure* closure) {
#ifndef NDEBUG
closure->scheduled = false;
GRPC_TRACE_LOG(closure, 2)
GRPC_TRACE_VLOG(closure, 2)
<< "running closure " << closure << ": created [" << closure->file_created
<< ":" << closure->line_created
<< "]: " << (closure->run ? "run" : "scheduled") << " ["
@ -43,7 +43,7 @@ static void exec_ctx_run(grpc_closure* closure) {
closure->error_data.error = 0;
closure->cb(closure->cb_arg, std::move(error));
#ifndef NDEBUG
GRPC_TRACE_LOG(closure, 2) << "closure " << closure << " finished";
GRPC_TRACE_VLOG(closure, 2) << "closure " << closure << " finished";
#endif
}

@ -95,8 +95,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
// sure that the shutdown error has been initialized properly before us
// referencing it.
gpr_atm curr = gpr_atm_acq_load(&state_);
GRPC_TRACE_LOG(polling, 2) << "LockfreeEvent::NotifyOn: " << this
<< " curr=" << curr << " closure=" << closure;
GRPC_TRACE_VLOG(polling, 2) << "LockfreeEvent::NotifyOn: " << this
<< " curr=" << curr << " closure=" << closure;
switch (curr) {
case kClosureNotReady: {
// kClosureNotReady -> <closure>.
@ -161,7 +161,7 @@ bool LockfreeEvent::SetShutdown(grpc_error_handle shutdown_error) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
GRPC_TRACE_LOG(polling, 2)
GRPC_TRACE_VLOG(polling, 2)
<< "LockfreeEvent::SetShutdown: " << &state_ << " curr=" << curr
<< " err=" << StatusToString(shutdown_error);
switch (curr) {
@ -209,7 +209,7 @@ void LockfreeEvent::SetReady() {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
GRPC_TRACE_LOG(polling, 2)
GRPC_TRACE_VLOG(polling, 2)
<< "LockfreeEvent::SetReady: " << &state_ << " curr=" << curr;
switch (curr) {

@ -78,8 +78,8 @@ static void CFStreamConnectCleanup(CFStreamConnect* connect) {
static void OnAlarm(void* arg, grpc_error_handle error) {
CFStreamConnect* connect = static_cast<CFStreamConnect*>(arg);
GRPC_TRACE_LOG(tcp, 2) << "CLIENT_CONNECT :" << connect << " OnAlarm, error:"
<< grpc_core::StatusToString(error);
GRPC_TRACE_VLOG(tcp, 2) << "CLIENT_CONNECT :" << connect << " OnAlarm, error:"
<< grpc_core::StatusToString(error);
gpr_mu_lock(&connect->mu);
grpc_closure* closure = connect->closure;
connect->closure = nil;
@ -97,8 +97,8 @@ static void OnAlarm(void* arg, grpc_error_handle error) {
static void OnOpen(void* arg, grpc_error_handle error) {
CFStreamConnect* connect = static_cast<CFStreamConnect*>(arg);
GRPC_TRACE_LOG(tcp, 2) << "CLIENT_CONNECT :" << connect << " OnOpen, error:"
<< grpc_core::StatusToString(error);
GRPC_TRACE_VLOG(tcp, 2) << "CLIENT_CONNECT :" << connect << " OnOpen, error:"
<< grpc_core::StatusToString(error);
gpr_mu_lock(&connect->mu);
grpc_timer_cancel(&connect->alarm);
grpc_closure* closure = connect->closure;
@ -169,8 +169,9 @@ static int64_t CFStreamClientConnect(
gpr_ref_init(&connect->refcount, 1);
gpr_mu_init(&connect->mu);
GRPC_TRACE_LOG(tcp, 2) << "CLIENT_CONNECT: " << connect << ", "
<< connect->addr_name << ": asynchronously connecting";
GRPC_TRACE_VLOG(tcp, 2) << "CLIENT_CONNECT: " << connect << ", "
<< connect->addr_name
<< ": asynchronously connecting";
CFReadStreamRef read_stream;
CFWriteStreamRef write_stream;

@ -334,7 +334,7 @@ static void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline,
timer->hash_table_next = nullptr;
#endif
GRPC_TRACE_LOG(timer, 2)
GRPC_TRACE_VLOG(timer, 2)
<< "TIMER " << timer << ": SET "
<< deadline.milliseconds_after_process_epoch() << " now "
<< grpc_core::Timestamp::Now().milliseconds_after_process_epoch()
@ -369,7 +369,7 @@ static void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline,
timer->heap_index = INVALID_HEAP_INDEX;
list_join(&shard->list, timer);
}
GRPC_TRACE_LOG(timer, 2)
GRPC_TRACE_VLOG(timer, 2)
<< " .. add to shard " << (shard - g_shards)
<< " with queue_deadline_cap="
<< shard->queue_deadline_cap.milliseconds_after_process_epoch()
@ -389,7 +389,7 @@ static void timer_init(grpc_timer* timer, grpc_core::Timestamp deadline,
// grpc_timer_check.
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
GRPC_TRACE_LOG(timer, 2)
GRPC_TRACE_VLOG(timer, 2)
<< " .. old shard min_deadline="
<< shard->min_deadline.milliseconds_after_process_epoch();
if (deadline < shard->min_deadline) {
@ -430,8 +430,9 @@ static void timer_cancel(grpc_timer* timer) {
timer_shard* shard = &g_shards[grpc_core::HashPointer(timer, g_num_shards)];
gpr_mu_lock(&shard->mu);
GRPC_TRACE_LOG(timer, 2) << "TIMER " << timer << ": CANCEL pending="
<< (timer->pending ? "true" : "false");
GRPC_TRACE_VLOG(timer, 2)
<< "TIMER " << timer
<< ": CANCEL pending=" << (timer->pending ? "true" : "false");
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
@ -469,7 +470,7 @@ static bool refill_heap(timer_shard* shard, grpc_core::Timestamp now) {
std::max(now, shard->queue_deadline_cap) +
grpc_core::Duration::FromSecondsAsDouble(deadline_delta);
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. shard[" << (shard - g_shards) << "]->queue_deadline_cap --> "
<< shard->queue_deadline_cap.milliseconds_after_process_epoch();
for (timer = shard->list.next; timer != &shard->list; timer = next) {
@ -479,7 +480,7 @@ static bool refill_heap(timer_shard* shard, grpc_core::Timestamp now) {
timer->deadline);
if (timer_deadline < shard->queue_deadline_cap) {
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. add timer with deadline "
<< timer_deadline.milliseconds_after_process_epoch() << " to heap";
list_remove(timer);
@ -495,7 +496,7 @@ static bool refill_heap(timer_shard* shard, grpc_core::Timestamp now) {
static grpc_timer* pop_one(timer_shard* shard, grpc_core::Timestamp now) {
grpc_timer* timer;
for (;;) {
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. shard[" << (shard - g_shards) << "]: heap_empty="
<< (grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
if (grpc_timer_heap_is_empty(&shard->heap)) {
@ -506,13 +507,13 @@ static grpc_timer* pop_one(timer_shard* shard, grpc_core::Timestamp now) {
auto timer_deadline =
grpc_core::Timestamp::FromMillisecondsAfterProcessEpoch(
timer->deadline);
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. check top timer deadline="
<< timer_deadline.milliseconds_after_process_epoch()
<< " now=" << now.milliseconds_after_process_epoch();
if (timer_deadline > now) return nullptr;
GRPC_TRACE_LOG(timer, 2) << "TIMER " << timer << ": FIRE "
<< (now - timer_deadline).millis() << "ms late";
GRPC_TRACE_VLOG(timer, 2) << "TIMER " << timer << ": FIRE "
<< (now - timer_deadline).millis() << "ms late";
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
@ -533,7 +534,7 @@ static size_t pop_timers(timer_shard* shard, grpc_core::Timestamp now,
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. shard[" << (shard - g_shards) << "] popped " << n;
return n;
}
@ -570,7 +571,7 @@ static grpc_timer_check_result run_some_expired_timers(
gpr_mu_lock(&g_shared_mutables.mu);
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. shard[" << (g_shard_queue[0] - g_shards)
<< "]->min_deadline = "
<< g_shard_queue[0]->min_deadline.milliseconds_after_process_epoch();
@ -587,7 +588,7 @@ static grpc_timer_check_result run_some_expired_timers(
result = GRPC_TIMERS_FIRED;
}
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< " .. result --> " << result << ", shard["
<< (g_shard_queue[0] - g_shards) << "]->min_deadline "
<< g_shard_queue[0]->min_deadline.milliseconds_after_process_epoch()
@ -642,7 +643,7 @@ static grpc_timer_check_result timer_check(grpc_core::Timestamp* next) {
if (next != nullptr) {
*next = std::min(*next, min_timer);
}
GRPC_TRACE_LOG(timer_check, 2)
GRPC_TRACE_VLOG(timer_check, 2)
<< "TIMER CHECK SKIP: now=" << now.milliseconds_after_process_epoch()
<< " min_timer=" << min_timer.milliseconds_after_process_epoch();
return GRPC_TIMERS_CHECKED_AND_EMPTY;

@ -87,7 +87,7 @@ class InterceptorList {
public:
RunPromise(size_t memory_required, Map** factory, absl::optional<T> value) {
if (!value.has_value() || *factory == nullptr) {
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this << "]: create immediate";
is_immediately_resolved_ = true;
Construct(&result_, std::move(value));
@ -98,14 +98,14 @@ class InterceptorList {
async_resolution_.space.get());
async_resolution_.current_factory = *factory;
async_resolution_.first_factory = factory;
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this
<< "]: create async; mem=" << async_resolution_.space.get();
}
}
~RunPromise() {
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this << "]: destroy";
if (is_immediately_resolved_) {
Destruct(&result_);
@ -123,7 +123,7 @@ class InterceptorList {
RunPromise(RunPromise&& other) noexcept
: is_immediately_resolved_(other.is_immediately_resolved_) {
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this << "]: move from "
<< &other;
if (is_immediately_resolved_) {
@ -136,7 +136,7 @@ class InterceptorList {
RunPromise& operator=(RunPromise&& other) noexcept = delete;
Poll<absl::optional<T>> operator()() {
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this << "]: " << DebugString();
if (is_immediately_resolved_) return std::move(result_);
while (true) {
@ -152,7 +152,7 @@ class InterceptorList {
async_resolution_.current_factory =
async_resolution_.current_factory->next();
if (!p->has_value()) async_resolution_.current_factory = nullptr;
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< "InterceptorList::RunPromise[" << this
<< "]: " << DebugString();
if (async_resolution_.current_factory == nullptr) {

@ -634,7 +634,7 @@ class Push {
Poll<bool> operator()() {
if (center_ == nullptr) {
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
<< GetContext<Activity>()->DebugTag()
<< " Pipe push has a null center";
return false;

@ -64,7 +64,7 @@ GrpcServerAuthzFilter::Create(const ChannelArgs& args, ChannelFilter::Args) {
bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
EvaluateArgs args(&initial_metadata, &per_channel_evaluate_args_);
GRPC_TRACE_LOG(grpc_authz_api, 2)
GRPC_TRACE_VLOG(grpc_authz_api, 2)
<< "checking request: url_path=" << args.GetPath()
<< ", transport_security_type=" << args.GetTransportSecurityType()
<< ", uri_sans=[" << absl::StrJoin(args.GetUriSans(), ",")
@ -86,7 +86,7 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
AuthorizationEngine::Decision decision =
engines.allow_engine->Evaluate(args);
if (decision.type == AuthorizationEngine::Decision::Type::kAllow) {
GRPC_TRACE_LOG(grpc_authz_api, 2)
GRPC_TRACE_VLOG(grpc_authz_api, 2)
<< "chand=" << this << ": request allowed by policy "
<< decision.matching_policy_name;
return true;

@ -188,7 +188,7 @@ void CallFilters::Finalize(const grpc_call_final_info* final_info) {
void CallFilters::CancelDueToFailedPipeOperation(SourceLocation but_where) {
// We expect something cancelled before now
if (push_server_trailing_metadata_ == nullptr) return;
GRPC_TRACE_LOG(promise_primitives, 2)
GRPC_TRACE_VLOG(promise_primitives, 2)
.AtLocation(but_where.file(), but_where.line())
<< "Cancelling due to failed pipe operation: " << DebugString();
auto status =

@ -90,11 +90,11 @@ NativeClientChannelDNSResolver::NativeClientChannelDNSResolver(
.set_max_backoff(Duration::Milliseconds(
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000)),
&dns_resolver_trace) {
GRPC_TRACE_LOG(dns_resolver, 2) << "[dns_resolver=" << this << "] created";
GRPC_TRACE_VLOG(dns_resolver, 2) << "[dns_resolver=" << this << "] created";
}
NativeClientChannelDNSResolver::~NativeClientChannelDNSResolver() {
GRPC_TRACE_LOG(dns_resolver, 2) << "[dns_resolver=" << this << "] destroyed";
GRPC_TRACE_VLOG(dns_resolver, 2) << "[dns_resolver=" << this << "] destroyed";
}
OrphanablePtr<Orphanable> NativeClientChannelDNSResolver::StartRequest() {
@ -103,7 +103,7 @@ OrphanablePtr<Orphanable> NativeClientChannelDNSResolver::StartRequest() {
absl::bind_front(&NativeClientChannelDNSResolver::OnResolved, this),
name_to_resolve(), kDefaultSecurePort, kDefaultDNSRequestTimeout,
interested_parties(), /*name_server=*/"");
GRPC_TRACE_LOG(dns_resolver, 2)
GRPC_TRACE_VLOG(dns_resolver, 2)
<< "[dns_resolver=" << this << "] starting request="
<< DNSResolver::HandleToString(dns_request_handle);
return MakeOrphanable<Request>();
@ -111,7 +111,7 @@ OrphanablePtr<Orphanable> NativeClientChannelDNSResolver::StartRequest() {
void NativeClientChannelDNSResolver::OnResolved(
absl::StatusOr<std::vector<grpc_resolved_address>> addresses_or) {
GRPC_TRACE_LOG(dns_resolver, 2)
GRPC_TRACE_VLOG(dns_resolver, 2)
<< "[dns_resolver=" << this
<< "] request complete, status=" << addresses_or.status();
// Convert result from iomgr DNS API into Resolver::Result.

@ -63,7 +63,7 @@ std::string GetNamespaceName() {
"/var/run/secrets/kubernetes.io/serviceaccount/namespace";
auto namespace_name = grpc_core::LoadFile(filename, false);
if (!namespace_name.ok()) {
GRPC_TRACE_LOG(environment_autodetect, 2)
GRPC_TRACE_VLOG(environment_autodetect, 2)
<< "Reading file " << filename
<< " failed: " << grpc_core::StatusToString(namespace_name.status());
// Fallback on an environment variable

Loading…
Cancel
Save