[Gpr_To_Absl_Logging]

pull/37350/head
tanvi-jagtap 4 months ago
parent 94ad0c5cbc
commit 94a14ae3c5
  1. 59
      src/core/lib/channel/promise_based_filter.cc
  2. 14
      src/core/lib/gprpp/work_serializer.cc
  3. 5
      src/core/lib/iomgr/ev_epoll1_linux.cc
  4. 4
      src/core/lib/iomgr/event_engine_shims/endpoint.cc
  5. 14
      src/core/lib/iomgr/tcp_client_posix.cc
  6. 22
      src/core/lib/iomgr/tcp_posix.cc
  7. 7
      src/core/lib/iomgr/tcp_server_posix.cc
  8. 6
      src/core/lib/resource_quota/memory_quota.cc
  9. 3
      src/core/lib/resource_quota/memory_quota.h
  10. 8
      src/core/lib/security/authorization/grpc_server_authz_filter.cc
  11. 19
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  12. 8
      src/core/lib/slice/slice_refcount.h
  13. 4
      src/core/lib/transport/bdp_estimator.cc
  14. 8
      src/core/lib/transport/bdp_estimator.h
  15. 12
      src/core/lib/transport/connectivity_state.cc

@ -207,8 +207,8 @@ void BaseCallData::CapturedBatch::ResumeWith(Flusher* releaser) {
uintptr_t& refcnt = *RefCountField(batch);
if (refcnt == 0) {
// refcnt==0 ==> cancelled
GRPC_TRACE_LOG(channel, INFO) << releaser->call()->DebugTag() < < < <
"RESUME BATCH REQUEST CANCELLED";
GRPC_TRACE_LOG(channel, INFO)
<< releaser->call()->DebugTag() << "RESUME BATCH REQUEST CANCELLED";
return;
}
if (--refcnt == 0) {
@ -265,8 +265,8 @@ BaseCallData::Flusher::~Flusher() {
BaseCallData* call =
static_cast<BaseCallData*>(batch->handler_private.extra_arg);
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:forward batch via closure: " < < < <
grpc_transport_stream_op_batch_string(batch, false);
<< "FLUSHER:forward batch via closure: "
<< grpc_transport_stream_op_batch_string(batch, false);
grpc_call_next_op(call->elem(), batch);
GRPC_CALL_STACK_UNREF(call->call_stack(), "flusher_batch");
};
@ -276,8 +276,8 @@ BaseCallData::Flusher::~Flusher() {
batch->is_traced = true;
}
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:queue batch to forward in closure: " < < < <
grpc_transport_stream_op_batch_string(release_[i], false);
<< "FLUSHER:queue batch to forward in closure: "
<< grpc_transport_stream_op_batch_string(release_[i], false);
batch->handler_private.extra_arg = call_;
GRPC_CLOSURE_INIT(&batch->handler_private.closure, call_next_op, batch,
nullptr);
@ -286,8 +286,9 @@ BaseCallData::Flusher::~Flusher() {
"flusher_batch");
}
call_closures_.RunClosuresWithoutYielding(call_->call_combiner());
GRPC_TRACE_LOG(channel, INFO) << "FLUSHER:forward batch: " < < < <
grpc_transport_stream_op_batch_string(release_[0], false);
GRPC_TRACE_LOG(channel, INFO)
<< "FLUSHER:forward batch: "
<< grpc_transport_stream_op_batch_string(release_[0], false);
if (call_->call() != nullptr && call_->call()->traced()) {
release_[0]->is_traced = true;
}
@ -325,8 +326,8 @@ const char* BaseCallData::SendMessage::StateString(State state) {
}
void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
GRPC_TRACE_LOG(channel, INFO) << base_->LogTag() < < < <
" SendMessage.StartOp st=" << StateString(state_);
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.StartOp st=" << StateString(state_);
switch (state_) {
case State::kInitial:
state_ = State::kGotBatchNoPipe;
@ -351,8 +352,8 @@ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
template <typename T>
void BaseCallData::SendMessage::GotPipe(T* pipe_end) {
GRPC_TRACE_LOG(channel, INFO) << base_->LogTag() < < < <
" SendMessage.GotPipe st=" << StateString(state_);
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.GotPipe st=" << StateString(state_);
CHECK_NE(pipe_end, nullptr);
switch (state_) {
case State::kInitial:
@ -605,8 +606,9 @@ const char* BaseCallData::ReceiveMessage::StateString(State state) {
}
void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
GRPC_TRACE_LOG(channel, INFO) << base_->LogTag() < < < <
" ReceiveMessage.StartOp st=" << StateString(state_);
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.StartOp st=" << StateString(state_);
switch (state_) {
case State::kInitial:
state_ = State::kForwardedBatchNoPipe;
@ -644,8 +646,9 @@ void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
template <typename T>
void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
GRPC_TRACE_LOG(channel, INFO) << base_->LogTag() < < < <
" ReceiveMessage.GotPipe st=" << StateString(state_);
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.GotPipe st=" << StateString(state_);
switch (state_) {
case State::kInitial:
state_ = State::kIdle;
@ -887,8 +890,9 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
case State::kPulledFromPipe: {
CHECK(push_.has_value());
if ((*push_)().ready()) {
GRPC_TRACE_LOG(channel, INFO) << base_->LogTag() < < < <
" ReceiveMessage.WakeInsideCombiner push complete";
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete";
if (state_ == State::kCompletedWhilePulledFromPipe) {
interceptor()->Push()->Close();
state_ = State::kCancelled;
@ -1001,8 +1005,8 @@ class ClientCallData::PollContext {
void Run() {
DCHECK(HasContext<Arena>());
GRPC_TRACE_LOG(channel, INFO)
<< self_->LogTag() << " ClientCallData.PollContext.Run " < < < <
self_->DebugString();
<< self_->LogTag() << " ClientCallData.PollContext.Run "
<< self_->DebugString();
CHECK(have_scoped_activity_);
repoll_ = false;
if (self_->send_message() != nullptr) {
@ -1648,8 +1652,7 @@ void ClientCallData::HookRecvTrailingMetadata(CapturedBatch batch) {
ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
CallArgs call_args) {
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.MakeNextPromise " < < < <
DebugString();
<< LogTag() << " ClientCallData.MakeNextPromise " << DebugString();
CHECK_NE(poll_ctx_, nullptr);
CHECK(send_initial_state_ == SendInitialState::kQueued);
send_initial_metadata_batch_->payload->send_initial_metadata
@ -1710,8 +1713,7 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
// application.
Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() {
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.PollTrailingMetadata " < < < <
DebugString();
<< LogTag() << " ClientCallData.PollTrailingMetadata " << DebugString();
CHECK_NE(poll_ctx_, nullptr);
if (send_initial_state_ == SendInitialState::kQueued) {
// First poll: pass the send_initial_metadata op down the stack.
@ -2256,8 +2258,9 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
// All polls: await sending the trailing metadata, then foward it down the
// stack.
Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
GRPC_TRACE_LOG(channel, INFO) << LogTag() < < < <
" PollTrailingMetadata: " << StateString(send_trailing_state_);
GRPC_TRACE_LOG(channel, INFO)
<< LogTag()
<< " PollTrailingMetadata: " << StateString(send_trailing_state_);
switch (send_trailing_state_) {
case SendTrailingState::kInitial:
case SendTrailingState::kQueuedBehindSendMessage:
@ -2286,8 +2289,8 @@ void ServerCallData::RecvTrailingMetadataReadyCallback(
void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": RecvTrailingMetadataReady error=" << error < < < <
" md=" << recv_trailing_metadata_->DebugString();
<< LogTag() << ": RecvTrailingMetadataReady error=" << error
<< " md=" << recv_trailing_metadata_->DebugString();
Flusher flusher(this);
PollContext poll_ctx(this, &flusher);
Completed(error, recv_trailing_metadata_->get(GrpcTarPit()).has_value(),

@ -136,9 +136,9 @@ class WorkSerializer::LegacyWorkSerializer final : public WorkSerializerImpl {
void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
const DebugLocation& location) {
GRPC_TRACE_LOG(work_serializer, INFO) << "WorkSerializer::Run() " << this
<< " Scheduling callback [" < < < <
location.file() << ":" << location.line() << "]";
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::Run() " << this << " Scheduling callback ["
<< location.file() << ":" << location.line() << "]";
// Increment queue size for the new callback and owner count to attempt to
// take ownership of the WorkSerializer.
const uint64_t prev_ref_pair =
@ -405,8 +405,8 @@ void WorkSerializer::DispatchingWorkSerializer::Orphan() {
void WorkSerializer::DispatchingWorkSerializer::Run(
std::function<void()> callback, const DebugLocation& location) {
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer[" << this << "] Scheduling callback [" < < < <
location.file() << ":" << location.line() << "]";
<< "WorkSerializer[" << this << "] Scheduling callback ["
<< location.file() << ":" << location.line() << "]";
global_stats().IncrementWorkSerializerItemsEnqueued();
MutexLock lock(&mu_);
if (!running_) {
@ -437,8 +437,8 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
// queue since processing_ is stored in reverse order.
auto& cb = processing_.back();
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer[" << this << "] Executing callback [" < < < <
cb.location.file() << ":" << cb.location.line() << "]";
<< "WorkSerializer[" << this << "] Executing callback ["
<< cb.location.file() << ":" << cb.location.line() << "]";
// Run the work item.
const auto start = std::chrono::steady_clock::now();
SetCurrentThread();

@ -1125,9 +1125,8 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
goto done;
} else {
GRPC_TRACE_LOG(polling, INFO)
<< " .. non-root poller " << next_worker < < < <
" (root=" << root_worker << ")";
GRPC_TRACE_LOG(polling, INFO) << " .. non-root poller " << next_worker
<< " (root=" << root_worker << ")";
SET_KICK_STATE(next_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;

@ -174,8 +174,8 @@ class EventEngineEndpointWrapper {
auto* write_buffer = reinterpret_cast<SliceBuffer*>(&eeep_->write_buffer);
write_buffer->~SliceBuffer();
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP: " << this << " WRITE (peer=" << PeerAddress() < < < <
") error=" << status;
<< "TCP: " << this << " WRITE (peer=" << PeerAddress()
<< ") error=" << status;
grpc_closure* cb = pending_write_cb_;
pending_write_cb_ = nullptr;
if (grpc_core::ExecCtx::Get() == nullptr) {

@ -141,8 +141,9 @@ done:
static void tc_on_alarm(void* acp, grpc_error_handle error) {
int done;
async_connect* ac = static_cast<async_connect*>(acp);
GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str < < < <
": on_alarm: error=" << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(tcp, INFO)
<< "CLIENT_CONNECT: " << ac->addr_str
<< ": on_alarm: error=" << grpc_core::StatusToString(error);
gpr_mu_lock(&ac->mu);
if (ac->fd != nullptr) {
grpc_fd_shutdown(ac->fd, GRPC_ERROR_CREATE("connect() timed out"));
@ -178,8 +179,9 @@ static void on_writable(void* acp, grpc_error_handle error) {
std::string addr_str = ac->addr_str;
grpc_fd* fd;
GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str < < < <
": on_writable: error=" << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(tcp, INFO)
<< "CLIENT_CONNECT: " << ac->addr_str
<< ": on_writable: error=" << grpc_core::StatusToString(error);
gpr_mu_lock(&ac->mu);
CHECK(ac->fd);
@ -377,8 +379,8 @@ int64_t grpc_tcp_client_create_from_prepared_fd(
grpc_schedule_on_exec_ctx);
ac->options = options;
GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str < < < <
": asynchronously connecting fd " << fdobj;
GRPC_TRACE_LOG(tcp, INFO) << "CLIENT_CONNECT: " << ac->addr_str
<< ": asynchronously connecting fd " << fdobj;
int shard_number = connection_id % (*g_connection_shards).size();
struct ConnectionShard* shard = &(*g_connection_shards)[shard_number];

@ -670,8 +670,7 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
g_backup_poller_mu->Unlock();
CHECK_GT(old_count, 1);
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " uncover cnt "
<< old_count << "->" < < < <
old_count - 1;
<< old_count << "->" << old_count - 1;
}
// gRPC API considers a Write operation to be done the moment it clears ‘flow
@ -704,9 +703,8 @@ static void cover_self(grpc_tcp* tcp) {
p = g_backup_poller;
g_backup_poller_mu->Unlock();
}
GRPC_TRACE_LOG(tcp, INFO)
<< "BACKUP_POLLER:" << p << " add " << tcp << " cnt " < < < <
old_count - 1 << "->" << old_count;
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " add " << tcp
<< " cnt " << old_count - 1 << "->" << old_count;
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
}
@ -729,8 +727,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) {
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << arg < < < <
" got_write: " << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP:" << arg << " got_write: " << grpc_core::StatusToString(error);
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
}
@ -1125,8 +1123,8 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp < < < <
" got_read: " << grpc_core::StatusToString(error);
GRPC_TRACE_LOG(tcp, INFO)
<< "TCP:" << tcp << " got_read: " << grpc_core::StatusToString(error);
tcp->read_mu.Lock();
grpc_error_handle tcp_read_error;
if (GPR_LIKELY(error.ok()) && tcp->memory_owner.is_valid()) {
@ -1466,9 +1464,9 @@ static bool process_errors(grpc_tcp* tcp) {
} else {
// Got a control message that is not a timestamp or zerocopy. Don't know
// how to handle this.
GRPC_TRACE_LOG(tcp, INFO) << "unknown control message cmsg_level:"
<< cmsg->cmsg_level < < < <
" cmsg_type:" << cmsg->cmsg_type;
GRPC_TRACE_LOG(tcp, INFO)
<< "unknown control message cmsg_level:" << cmsg->cmsg_level
<< " cmsg_type:" << cmsg->cmsg_type;
return processed_err;
}
}

@ -178,8 +178,8 @@ static grpc_error_handle CreateEventEngineListener(
return;
}
GRPC_TRACE_LOG(tcp, INFO) << "SERVER_CONNECT: incoming external "
"connection: " < < < <
addr_uri->c_str();
"connection: "
<< addr_uri->c_str();
}
read_notifier_pollset =
(*(s->pollsets))[static_cast<size_t>(
@ -916,8 +916,7 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
return;
}
GRPC_TRACE_LOG(tcp, INFO)
<< "SERVER_CONNECT: incoming external connection: " < < < <
*addr_uri;
<< "SERVER_CONNECT: incoming external connection: " << *addr_uri;
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);
read_notifier_pollset =

@ -766,9 +766,9 @@ double PressureTracker::AddSampleAndGetControlValue(double sample) {
} else {
report = controller_.Update(current_estimate - kSetPoint);
}
GRPC_TRACE_LOG(resource_quota, INFO) << "RQ: pressure:" << current_estimate
<< " report:" << report < < < <
" controller:" << controller_.DebugString();
GRPC_TRACE_LOG(resource_quota, INFO)
<< "RQ: pressure:" << current_estimate << " report:" << report
<< " controller:" << controller_.DebugString();
report_.store(report, std::memory_order_relaxed);
});
return report_.load(std::memory_order_relaxed);

@ -426,8 +426,7 @@ class GrpcMemoryAllocatorImpl final : public EventEngineMemoryAllocatorImpl {
size_t ret = free_bytes_.exchange(0, std::memory_order_acq_rel);
if (ret == 0) return;
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Allocator " << this << " returning " << ret < < < <
" bytes to quota";
<< "Allocator " << this << " returning " << ret << " bytes to quota";
taken_bytes_.fetch_sub(ret, std::memory_order_relaxed);
memory_quota_->Return(ret);
memory_quota_->MaybeMoveAllocator(this, /*old_free_bytes=*/ret,

@ -78,8 +78,8 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
engines.deny_engine->Evaluate(args);
if (decision.type == AuthorizationEngine::Decision::Type::kDeny) {
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "chand=" << this << ": request denied by policy " < < < <
decision.matching_policy_name;
<< "chand=" << this << ": request denied by policy "
<< decision.matching_policy_name;
return false;
}
}
@ -94,8 +94,8 @@ bool GrpcServerAuthzFilter::IsAuthorized(ClientMetadata& initial_metadata) {
return true;
}
}
GRPC_TRACE_LOG(grpc_authz_api, INFO) << "chand=" << this < < < <
": request denied, no matching policy found.";
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "chand=" << this << ": request denied, no matching policy found.";
return false;
}

@ -122,9 +122,8 @@ void grpc_plugin_credentials::PendingRequest::RequestMetadataReady(
grpc_core::RefCountedPtr<grpc_plugin_credentials::PendingRequest> r(
static_cast<grpc_plugin_credentials::PendingRequest*>(request));
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << r->creds() << "]: request "
<< r.get() < < < <
": plugin returned asynchronously";
<< "plugin_credentials[" << r->creds() << "]: request " << r.get()
<< ": plugin returned asynchronously";
for (size_t i = 0; i < num_md; ++i) {
grpc_metadata p;
p.key = grpc_core::CSliceRef(md[i].key);
@ -151,9 +150,8 @@ grpc_plugin_credentials::GetRequestMetadata(
args);
// Invoke the plugin. The callback holds a ref to us.
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request "
<< request.get() < < < <
": invoking plugin";
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": invoking plugin";
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
grpc_status_code status = GRPC_STATUS_OK;
@ -169,15 +167,14 @@ grpc_plugin_credentials::GetRequestMetadata(
&status, &error_details)) {
child_request.release();
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request " < < < <
request.get() << ": plugin will return asynchronously";
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": plugin will return asynchronously";
return [request] { return request->PollAsyncResult(); };
}
// Synchronous return.
GRPC_TRACE_LOG(plugin_credentials, INFO)
<< "plugin_credentials[" << this << "]: request "
<< request.get() < < < <
": plugin returned synchronously";
<< "plugin_credentials[" << this << "]: request " << request.get()
<< ": plugin returned synchronously";
auto result = request->ProcessPluginResult(creds_md, num_creds_md, status,
error_details);
// Clean up.

@ -47,14 +47,14 @@ struct grpc_slice_refcount {
void Ref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_add(1, std::memory_order_relaxed);
GRPC_TRACE_LOG(slice_refcount, INFO)
.AtLocation(location.file(), location.line()) < < < <
"REF " << this << " " << prev_refs << "->" << prev_refs + 1;
.AtLocation(location.file(), location.line())
<< "REF " << this << " " << prev_refs << "->" << prev_refs + 1;
}
void Unref(grpc_core::DebugLocation location) {
auto prev_refs = ref_.fetch_sub(1, std::memory_order_acq_rel);
GRPC_TRACE_LOG(slice_refcount, INFO)
.AtLocation(location.file(), location.line()) < < < <
"UNREF " << this << " " << prev_refs << "->" << prev_refs - 1;
.AtLocation(location.file(), location.line())
<< "UNREF " << this << " " << prev_refs << "->" << prev_refs - 1;
if (prev_refs == 1) {
destroyer_fn_(this);
}

@ -72,8 +72,8 @@ Timestamp BdpEstimator::CompletePing() {
if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0;
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:update_inter_time to " < < < <
inter_ping_delay_.millis() << "ms";
<< "bdp[" << name_ << "]:update_inter_time to "
<< inter_ping_delay_.millis() << "ms";
}
ping_state_ = PingState::UNSCHEDULED;
accumulator_ = 0;

@ -50,8 +50,8 @@ class BdpEstimator {
// transport (but not necessarily started)
void SchedulePing() {
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:sched acc=" << accumulator_ < < < <
" est=" << estimate_;
<< "bdp[" << name_ << "]:sched acc=" << accumulator_
<< " est=" << estimate_;
CHECK(ping_state_ == PingState::UNSCHEDULED);
ping_state_ = PingState::SCHEDULED;
accumulator_ = 0;
@ -62,8 +62,8 @@ class BdpEstimator {
// the ping is on the wire
void StartPing() {
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:start acc=" << accumulator_ < < < <
" est=" << estimate_;
<< "bdp[" << name_ << "]:start acc=" << accumulator_
<< " est=" << estimate_;
CHECK(ping_state_ == PingState::SCHEDULED);
ping_state_ = PingState::STARTED;
ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);

@ -118,8 +118,8 @@ void ConnectivityStateTracker::AddWatcher(
grpc_connectivity_state initial_state,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this < < < <
"]: add watcher " << watcher.get();
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: add watcher " << watcher.get();
grpc_connectivity_state current_state =
state_.load(std::memory_order_relaxed);
if (initial_state != current_state) {
@ -141,8 +141,8 @@ void ConnectivityStateTracker::AddWatcher(
void ConnectivityStateTracker::RemoveWatcher(
ConnectivityStateWatcherInterface* watcher) {
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this < < < <
"]: remove watcher " << watcher;
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: remove watcher " << watcher;
watchers_.erase(watcher);
}
@ -177,8 +177,8 @@ void ConnectivityStateTracker::SetState(grpc_connectivity_state state,
grpc_connectivity_state ConnectivityStateTracker::state() const {
grpc_connectivity_state state = state_.load(std::memory_order_relaxed);
GRPC_TRACE_LOG(connectivity_state, INFO)
<< "ConnectivityStateTracker " << name_ << "[" << this < < < <
"]: get current state: " << ConnectivityStateName(state);
<< "ConnectivityStateTracker " << name_ << "[" << this
<< "]: get current state: " << ConnectivityStateName(state);
return state;
}

Loading…
Cancel
Save