From 3df8e0787f1170c088d3fc7470415af8f8b4de6a Mon Sep 17 00:00:00 2001 From: Tanvi Jagtap <139093547+tanvi-jagtap@users.noreply.github.com> Date: Fri, 24 May 2024 03:32:12 -0700 Subject: [PATCH] [grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log (#36711) [grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log In this CL we are migrating from gRPCs own gpr logging mechanism to absl logging mechanism. The intention is to deprecate gpr_log in the future. We have the following mapping 1. gpr_log(GPR_INFO,...) -> LOG(INFO) 2. gpr_log(GPR_ERROR,...) -> LOG(ERROR) 3. gpr_log(GPR_DEBUG,...) -> VLOG(2) Reviewers need to check : 1. If the above mapping is correct. 2. The content of the log is as before. gpr_log format strings did not use string_view or std::string . absl LOG accepts these. So there will be some elimination of string_view and std::string related conversions. This is expected. Closes #36711 COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/36711 from tanvi-jagtap:trash e208603d6895e4a6e747d2c0418d6bff7ed8624c PiperOrigin-RevId: 636858250 --- .../external_connection_acceptor_impl.cc | 8 +- .../health/default_health_check_service.cc | 40 +++++----- .../server/load_reporter/load_data_store.cc | 51 ++++++------ src/cpp/server/load_reporter/load_reporter.cc | 40 +++++----- .../load_reporter_async_service_impl.cc | 78 +++++++++---------- .../tests/CronetTests/CronetUnitTests.mm | 3 +- 6 files changed, 103 insertions(+), 117 deletions(-) diff --git a/src/cpp/server/external_connection_acceptor_impl.cc b/src/cpp/server/external_connection_acceptor_impl.cc index 2237ac88131..208ac0a37f2 100644 --- a/src/cpp/server/external_connection_acceptor_impl.cc +++ b/src/cpp/server/external_connection_acceptor_impl.cc @@ -22,8 +22,8 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" -#include #include #include #include @@ -69,10 +69,8 @@ void ExternalConnectionAcceptorImpl::HandleNewConnection( grpc_core::MutexLock lock(&mu_); if (shutdown_ || !started_) { // TODO(yangg) clean up. - gpr_log( - GPR_ERROR, - "NOT handling external connection with fd %d, started %d, shutdown %d", - p->fd, started_, shutdown_); + LOG(ERROR) << "NOT handling external connection with fd " << p->fd + << ", started " << started_ << ", shutdown " << shutdown_; return; } if (handler_) { diff --git a/src/cpp/server/health/default_health_check_service.cc b/src/cpp/server/health/default_health_check_service.cc index fc0b59c5299..179bdf7a2ed 100644 --- a/src/cpp/server/health/default_health_check_service.cc +++ b/src/cpp/server/health/default_health_check_service.cc @@ -24,11 +24,11 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "upb/base/string_view.h" #include "upb/mem/arena.hpp" #include -#include #include #include #include @@ -259,8 +259,8 @@ DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor::WatchReactor( ++service_->num_watches_; } bool success = DecodeRequest(*request, &service_name_); - gpr_log(GPR_DEBUG, "[HCS %p] watcher %p \"%s\": watch call started", service_, - this, service_name_.c_str()); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": watch call started"; if (!success) { MaybeFinishLocked(Status(StatusCode::INTERNAL, "could not parse request")); return; @@ -271,15 +271,14 @@ DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor::WatchReactor( void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor:: SendHealth(ServingStatus status) { - gpr_log(GPR_DEBUG, - "[HCS %p] watcher %p \"%s\": SendHealth() for ServingStatus %d", - service_, this, service_name_.c_str(), status); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": SendHealth() for ServingStatus " << status; grpc::internal::MutexLock lock(&mu_); // If there's already a send in flight, cache the new status, and // we'll start a new send for it when the one in flight completes. if (write_pending_) { - gpr_log(GPR_DEBUG, "[HCS %p] watcher %p \"%s\": queuing write", service_, - this, service_name_.c_str()); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": queuing write"; pending_status_ = status; return; } @@ -307,17 +306,16 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor:: Status(StatusCode::INTERNAL, "could not encode response")); return; } - gpr_log(GPR_DEBUG, - "[HCS %p] watcher %p \"%s\": starting write for ServingStatus %d", - service_, this, service_name_.c_str(), status); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": starting write for ServingStatus " << status; write_pending_ = true; StartWrite(&response_); } void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor:: OnWriteDone(bool ok) { - gpr_log(GPR_DEBUG, "[HCS %p] watcher %p \"%s\": OnWriteDone(): ok=%d", - service_, this, service_name_.c_str(), ok); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": OnWriteDone(): ok=" << ok; response_.Clear(); grpc::internal::MutexLock lock(&mu_); if (!ok) { @@ -341,8 +339,8 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor:: } void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor::OnDone() { - gpr_log(GPR_DEBUG, "[HCS %p] watcher %p \"%s\": OnDone()", service_, this, - service_name_.c_str()); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": OnDone()"; service_->database_->UnregisterWatch(service_name_, this); { grpc::internal::MutexLock lock(&service_->mu_); @@ -356,13 +354,13 @@ void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor::OnDone() { void DefaultHealthCheckService::HealthCheckServiceImpl::WatchReactor:: MaybeFinishLocked(Status status) { - gpr_log(GPR_DEBUG, - "[HCS %p] watcher %p \"%s\": MaybeFinishLocked() with code=%d msg=%s", - service_, this, service_name_.c_str(), status.error_code(), - status.error_message().c_str()); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ + << "\": MaybeFinishLocked() with code=" << status.error_code() + << " msg=" << status.error_message(); if (!finish_called_) { - gpr_log(GPR_DEBUG, "[HCS %p] watcher %p \"%s\": actually calling Finish()", - service_, this, service_name_.c_str()); + VLOG(2) << "[HCS " << service_ << "] watcher " << this << " \"" + << service_name_ << "\": actually calling Finish()"; finish_called_ = true; Finish(status); } diff --git a/src/cpp/server/load_reporter/load_data_store.cc b/src/cpp/server/load_reporter/load_data_store.cc index 0f7acfbded9..649c073efe1 100644 --- a/src/cpp/server/load_reporter/load_data_store.cc +++ b/src/cpp/server/load_reporter/load_data_store.cc @@ -27,8 +27,8 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" -#include #include #include "src/core/lib/iomgr/socket_utils.h" @@ -111,9 +111,8 @@ std::string LoadRecordKey::GetClientIpBytes() const { } else if (client_ip_hex_.size() == kIpv4AddressLength) { uint32_t ip_bytes; if (sscanf(client_ip_hex_.c_str(), "%x", &ip_bytes) != 1) { - gpr_log(GPR_ERROR, - "Can't parse client IP (%s) from a hex string to an integer.", - client_ip_hex_.c_str()); + LOG(ERROR) << "Can't parse client IP (" << client_ip_hex_ + << ") from a hex string to an integer."; return ""; } ip_bytes = grpc_htonl(ip_bytes); @@ -124,10 +123,9 @@ std::string LoadRecordKey::GetClientIpBytes() const { for (size_t i = 0; i < 4; ++i) { if (sscanf(client_ip_hex_.substr(i * 8, (i + 1) * 8).c_str(), "%x", ip_bytes + i) != 1) { - gpr_log( - GPR_ERROR, - "Can't parse client IP part (%s) from a hex string to an integer.", - client_ip_hex_.substr(i * 8, (i + 1) * 8).c_str()); + LOG(ERROR) << "Can't parse client IP part (" + << client_ip_hex_.substr(i * 8, (i + 1) * 8) + << ") from a hex string to an integer."; return ""; } ip_bytes[i] = grpc_htonl(ip_bytes[i]); @@ -150,13 +148,13 @@ void PerBalancerStore::MergeRow(const LoadRecordKey& key, // During suspension, the load data received will be dropped. if (!suspended_) { load_record_map_[key].MergeFrom(value); - gpr_log(GPR_DEBUG, - "[PerBalancerStore %p] Load data merged (Key: %s, Value: %s).", - this, key.ToString().c_str(), value.ToString().c_str()); + VLOG(2) << "[PerBalancerStore " << this + << "] Load data merged (Key: " << key.ToString() + << ", Value: " << value.ToString() << ")."; } else { - gpr_log(GPR_DEBUG, - "[PerBalancerStore %p] Load data dropped (Key: %s, Value: %s).", - this, key.ToString().c_str(), value.ToString().c_str()); + VLOG(2) << "[PerBalancerStore " << this + << "] Load data dropped (Key: " << key.ToString() + << ", Value: " << value.ToString() << ")."; } // We always keep track of num_calls_in_progress_, so that when this // store is resumed, we still have a correct value of @@ -170,12 +168,12 @@ void PerBalancerStore::MergeRow(const LoadRecordKey& key, void PerBalancerStore::Suspend() { suspended_ = true; load_record_map_.clear(); - gpr_log(GPR_DEBUG, "[PerBalancerStore %p] Suspended.", this); + VLOG(2) << "[PerBalancerStore " << this << "] Suspended."; } void PerBalancerStore::Resume() { suspended_ = false; - gpr_log(GPR_DEBUG, "[PerBalancerStore %p] Resumed.", this); + VLOG(2) << "[PerBalancerStore " << this << "] Resumed."; } uint64_t PerBalancerStore::GetNumCallsInProgressForReport() { @@ -260,11 +258,9 @@ void PerHostStore::AssignOrphanedStore(PerBalancerStore* orphaned_store, auto it = assigned_stores_.find(new_receiver); CHECK(it != assigned_stores_.end()); it->second.insert(orphaned_store); - gpr_log(GPR_INFO, - "[PerHostStore %p] Re-assigned orphaned store (%p) with original LB" - " ID of %s to new receiver %s", - this, orphaned_store, orphaned_store->lb_id().c_str(), - new_receiver.c_str()); + LOG(INFO) << "[PerHostStore " << this << "] Re-assigned orphaned store (" + << orphaned_store << ") with original LB ID of " + << orphaned_store->lb_id() << " to new receiver " << new_receiver; } void PerHostStore::SetUpForNewLbId(const std::string& lb_id, @@ -306,17 +302,16 @@ void LoadDataStore::MergeRow(const std::string& hostname, if (in_progress_delta != 0) { auto it_tracker = unknown_balancer_id_trackers_.find(key.lb_id()); if (it_tracker == unknown_balancer_id_trackers_.end()) { - gpr_log( - GPR_DEBUG, - "[LoadDataStore %p] Start tracking unknown balancer (lb_id_: %s).", - this, key.lb_id().c_str()); + VLOG(2) << "[LoadDataStore " << this + << "] Start tracking unknown balancer (lb_id_: " << key.lb_id() + << ")."; unknown_balancer_id_trackers_.insert( {key.lb_id(), static_cast(in_progress_delta)}); } else if ((it_tracker->second += in_progress_delta) == 0) { unknown_balancer_id_trackers_.erase(it_tracker); - gpr_log(GPR_DEBUG, - "[LoadDataStore %p] Stop tracking unknown balancer (lb_id_: %s).", - this, key.lb_id().c_str()); + VLOG(2) << "[LoadDataStore " << this + << "] Stop tracking unknown balancer (lb_id_: " << key.lb_id() + << ")."; } } } diff --git a/src/cpp/server/load_reporter/load_reporter.cc b/src/cpp/server/load_reporter/load_reporter.cc index 9a0e2a875e9..6af88e8ba7f 100644 --- a/src/cpp/server/load_reporter/load_reporter.cc +++ b/src/cpp/server/load_reporter/load_reporter.cc @@ -28,9 +28,9 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include "opencensus/tags/tag_key.h" -#include #include #include "src/cpp/server/load_reporter/constants.h" @@ -201,20 +201,19 @@ CensusViewProviderDefaultImpl::CensusViewProviderDefaultImpl() { } CensusViewProvider::ViewDataMap CensusViewProviderDefaultImpl::FetchViewData() { - gpr_log(GPR_DEBUG, "[CVP %p] Starts fetching Census view data.", this); + VLOG(2) << "[CVP " << this << "] Starts fetching Census view data."; ViewDataMap view_data_map; for (auto& p : view_map_) { const std::string& view_name = p.first; ::opencensus::stats::View& view = p.second; if (view.IsValid()) { view_data_map.emplace(view_name, view.GetData()); - gpr_log(GPR_DEBUG, "[CVP %p] Fetched view data (view: %s).", this, - view_name.c_str()); + VLOG(2) << "[CVP " << this << "] Fetched view data (view: " << view_name + << ")."; } else { - gpr_log( - GPR_DEBUG, - "[CVP %p] Can't fetch view data because view is invalid (view: %s).", - this, view_name.c_str()); + VLOG(2) << "[CVP " << this + << "] Can't fetch view data because view is invalid (view: " + << view_name << ")."; } } return view_data_map; @@ -223,8 +222,8 @@ CensusViewProvider::ViewDataMap CensusViewProviderDefaultImpl::FetchViewData() { std::string LoadReporter::GenerateLbId() { while (true) { if (next_lb_id_ > UINT32_MAX) { - gpr_log(GPR_ERROR, "[LR %p] The LB ID exceeds the max valid value!", - this); + LOG(ERROR) << "[LR " << this + << "] The LB ID exceeds the max valid value!"; return ""; } int64_t lb_id = next_lb_id_++; @@ -387,17 +386,16 @@ void LoadReporter::ReportStreamCreated(const std::string& hostname, const std::string& load_key) { grpc_core::MutexLock lock(&store_mu_); load_data_store_.ReportStreamCreated(hostname, lb_id, load_key); - gpr_log(GPR_INFO, - "[LR %p] Report stream created (host: %s, LB ID: %s, load key: %s).", - this, hostname.c_str(), lb_id.c_str(), load_key.c_str()); + LOG(INFO) << "[LR " << this << "] Report stream created (host: " << hostname + << ", LB ID: " << lb_id << ", load key: " << load_key << ")."; } void LoadReporter::ReportStreamClosed(const std::string& hostname, const std::string& lb_id) { grpc_core::MutexLock lock(&store_mu_); load_data_store_.ReportStreamClosed(hostname, lb_id); - gpr_log(GPR_INFO, "[LR %p] Report stream closed (host: %s, LB ID: %s).", this, - hostname.c_str(), lb_id.c_str()); + LOG(INFO) << "[LR " << this << "] Report stream closed (host: " << hostname + << ", LB ID: " << lb_id << ")."; } void LoadReporter::ProcessViewDataCallStart( @@ -437,9 +435,8 @@ void LoadReporter::ProcessViewDataCallEnd( // implementation. // TODO(juanlishen): Check whether this situation happens in OSS C++. if (client_ip_and_token.empty()) { - gpr_log(GPR_DEBUG, - "Skipping processing Opencensus record with empty " - "client_ip_and_token tag."); + VLOG(2) << "Skipping processing Opencensus record with empty " + "client_ip_and_token tag."; continue; } LoadRecordKey key(client_ip_and_token, user_id); @@ -500,10 +497,9 @@ void LoadReporter::ProcessViewDataOtherCallMetrics( } void LoadReporter::FetchAndSample() { - gpr_log(GPR_DEBUG, - "[LR %p] Starts fetching Census view data and sampling LB feedback " - "record.", - this); + VLOG(2) << "[LR " << this + << "] Starts fetching Census view data and sampling LB feedback " + "record."; CensusViewProvider::ViewDataMap view_data_map = census_view_provider_->FetchViewData(); ProcessViewDataCallStart(view_data_map); diff --git a/src/cpp/server/load_reporter/load_reporter_async_service_impl.cc b/src/cpp/server/load_reporter/load_reporter_async_service_impl.cc index f4d20b13768..fc452fc8ced 100644 --- a/src/cpp/server/load_reporter/load_reporter_async_service_impl.cc +++ b/src/cpp/server/load_reporter/load_reporter_async_service_impl.cc @@ -23,6 +23,7 @@ #include #include "absl/log/check.h" +#include "absl/log/log.h" #include #include @@ -83,15 +84,15 @@ void LoadReporterAsyncServiceImpl::ScheduleNextFetchAndSample() { next_fetch_and_sample_alarm_->Set(cq_.get(), next_fetch_and_sample_time, this); } - gpr_log(GPR_DEBUG, "[LRS %p] Next fetch-and-sample scheduled.", this); + VLOG(2) << "[LRS " << this << "] Next fetch-and-sample scheduled."; } void LoadReporterAsyncServiceImpl::FetchAndSample(bool ok) { if (!ok) { - gpr_log(GPR_INFO, "[LRS %p] Fetch-and-sample is stopped.", this); + LOG(INFO) << "[LRS " << this << "] Fetch-and-sample is stopped."; return; } - gpr_log(GPR_DEBUG, "[LRS %p] Starting a fetch-and-sample...", this); + VLOG(2) << "[LRS " << this << "] Starting a fetch-and-sample..."; load_reporter_->FetchAndSample(); ScheduleNextFetchAndSample(); } @@ -191,10 +192,10 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnRequestDelivered( } // LB ID is unique for each load reporting stream. lb_id_ = load_reporter_->GenerateLbId(); - gpr_log(GPR_INFO, - "[LRS %p] Call request delivered (lb_id_: %s, handler: %p). " - "Start reading the initial request...", - service_, lb_id_.c_str(), this); + LOG(INFO) << "[LRS " << service_ + << "] Call request delivered (lb_id_: " << lb_id_ + << ", handler: " << this + << "). Start reading the initial request..."; } void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone( @@ -202,11 +203,12 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone( if (!ok || shutdown_) { if (!ok && call_status_ < INITIAL_REQUEST_RECEIVED) { // The client may have half-closed the stream or the stream is broken. - gpr_log(GPR_INFO, - "[LRS %p] Failed reading the initial request from the stream " - "(lb_id_: %s, handler: %p, done_notified: %d, is_cancelled: %d).", - service_, lb_id_.c_str(), this, static_cast(done_notified_), - static_cast(is_cancelled_)); + LOG(INFO) << "[LRS " << service_ + << "] Failed reading the initial request from the stream " + "(lb_id_: " + << lb_id_ << ", handler: " << this + << ", done_notified: " << done_notified_ + << ", is_cancelled: " << is_cancelled_ << ")."; } Shutdown(std::move(self), "OnReadDone"); return; @@ -226,12 +228,12 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone( load_report_interval_ms_ = static_cast(load_report_interval.seconds() * 1000 + load_report_interval.nanos() / 1000); - gpr_log(GPR_INFO, - "[LRS %p] Initial request received. Start load reporting (load " - "balanced host: %s, interval: %" PRIu64 - " ms, lb_id_: %s, handler: %p)...", - service_, load_balanced_hostname_.c_str(), - load_report_interval_ms_, lb_id_.c_str(), this); + LOG(INFO) << "[LRS " << service_ + << "] Initial request received. Start load reporting (load " + "balanced host: " + << load_balanced_hostname_ + << ", interval: " << load_report_interval_ms_ + << " ms, lb_id_: " << lb_id_ << ", handler: " << this << ")..."; SendReport(self, true /* ok */); // Expect this read to fail. { @@ -250,9 +252,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnReadDone( } } else { // Another request received! This violates the spec. - gpr_log(GPR_ERROR, - "[LRS %p] Another request received (lb_id_: %s, handler: %p).", - service_, lb_id_.c_str(), this); + LOG(ERROR) << "[LRS " << service_ + << "] Another request received (lb_id_: " << lb_id_ + << ", handler: " << this << ")."; Shutdown(std::move(self), "OnReadDone+second_request"); } } @@ -282,9 +284,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::ScheduleNextReport( next_report_alarm_ = std::make_unique(); next_report_alarm_->Set(cq_, next_report_time, &next_outbound_); } - gpr_log(GPR_DEBUG, - "[LRS %p] Next load report scheduled (lb_id_: %s, handler: %p).", - service_, lb_id_.c_str(), this); + VLOG(2) << "[LRS " << service_ + << "] Next load report scheduled (lb_id_: " << lb_id_ + << ", handler: " << this << ")."; } void LoadReporterAsyncServiceImpl::ReportLoadHandler::SendReport( @@ -318,10 +320,10 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::SendReport( std::placeholders::_1, std::placeholders::_2), std::move(self)); stream_.Write(response, &next_outbound_); - gpr_log(GPR_INFO, - "[LRS %p] Sending load report (lb_id_: %s, handler: %p, loads " - "count: %d)...", - service_, lb_id_.c_str(), this, response.load().size()); + LOG(INFO) << "[LRS " << service_ + << "] Sending load report (lb_id_: " << lb_id_ + << ", handler: " << this + << ", loads count: " << response.load().size() << ")..."; } } @@ -332,20 +334,18 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnDoneNotified( if (ctx_.IsCancelled()) { is_cancelled_ = true; } - gpr_log(GPR_INFO, - "[LRS %p] Load reporting call is notified done (handler: %p, " - "is_cancelled: %d).", - service_, this, static_cast(is_cancelled_)); + LOG(INFO) << "[LRS " << service_ + << "] Load reporting call is notified done (handler: " << this + << ", is_cancelled: " << is_cancelled_ << ")."; Shutdown(std::move(self), "OnDoneNotified"); } void LoadReporterAsyncServiceImpl::ReportLoadHandler::Shutdown( std::shared_ptr self, const char* reason) { if (!shutdown_) { - gpr_log(GPR_INFO, - "[LRS %p] Shutting down the handler (lb_id_: %s, handler: %p, " - "reason: %s).", - service_, lb_id_.c_str(), this, reason); + LOG(INFO) << "[LRS " << service_ + << "] Shutting down the handler (lb_id_: " << lb_id_ + << ", handler: " << this << ", reason: " << reason << ")."; shutdown_ = true; if (call_status_ >= INITIAL_REQUEST_RECEIVED) { load_reporter_->ReportStreamClosed(load_balanced_hostname_, lb_id_); @@ -374,9 +374,9 @@ void LoadReporterAsyncServiceImpl::ReportLoadHandler::OnFinishDone( // NOLINTNEXTLINE(performance-unnecessary-value-param) std::shared_ptr /*self*/, bool ok) { if (ok) { - gpr_log(GPR_INFO, - "[LRS %p] Load reporting finished (lb_id_: %s, handler: %p).", - service_, lb_id_.c_str(), this); + LOG(INFO) << "[LRS " << service_ + << "] Load reporting finished (lb_id_: " << lb_id_ + << ", handler: " << this << ")."; } } diff --git a/src/objective-c/tests/CronetTests/CronetUnitTests.mm b/src/objective-c/tests/CronetTests/CronetUnitTests.mm index e09a06e821a..ba913160525 100644 --- a/src/objective-c/tests/CronetTests/CronetUnitTests.mm +++ b/src/objective-c/tests/CronetTests/CronetUnitTests.mm @@ -29,7 +29,6 @@ #import "test/core/test_util/port.h" #import -#import #import "src/core/lib/channel/channel_args.h" #import "src/core/lib/gprpp/env.h" @@ -322,7 +321,7 @@ unsigned int parse_h2_length(const char *field) { long len; BOOL coalesced = NO; while ((len = SSL_read(ssl, buf, sizeof(buf))) > 0) { - gpr_log(GPR_DEBUG, "Read len: %ld", len); + VLOG(2) << "Read len: " << len; // Analyze the HTTP/2 frames in the same TLS PDU to identify if // coalescing is successful