Implement TraceFlag::Log (#26954)

Motivation: In debug builds, `DebugOnlyTraceFlag`s are hard-coded to be
disabled. This results in unreachable code paths that the compiler can
detect, which prevent us from enabling `-Wunreachable-code-aggressive`
on the builds.

This work aims to reduce the number of places that switch on
`trace_flag.enabled`.
pull/27113/head
AJ Heller 4 years ago committed by GitHub
parent 971e95b906
commit 4d2b979b75
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      BUILD
  2. 1
      build_autogenerated.yaml
  3. 2
      gRPC-C++.podspec
  4. 2
      gRPC-Core.podspec
  5. 1
      grpc.gemspec
  6. 1
      package.xml
  7. 449
      src/core/ext/filters/client_channel/client_channel.cc
  8. 53
      src/core/ext/filters/client_channel/health/health_check_client.cc
  9. 65
      src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc
  10. 105
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  11. 75
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  12. 198
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  13. 60
      src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc
  14. 79
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  15. 110
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  16. 132
      src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc
  17. 78
      src/core/ext/filters/client_channel/lb_policy/xds/cds.cc
  18. 91
      src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc
  19. 113
      src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc
  20. 116
      src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc
  21. 54
      src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc
  22. 463
      src/core/ext/filters/client_channel/retry_filter.cc
  23. 20
      src/core/ext/filters/client_channel/subchannel.cc
  24. 31
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  25. 76
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  26. 15
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  27. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  28. 4
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  29. 9
      src/core/ext/transport/chttp2/transport/hpack_table.cc
  30. 9
      src/core/ext/transport/chttp2/transport/parsing.cc
  31. 14
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  32. 49
      src/core/ext/transport/chttp2/transport/writing.cc
  33. 184
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  34. 6
      src/core/ext/xds/xds_api.cc
  35. 338
      src/core/ext/xds/xds_client.cc
  36. 53
      src/core/ext/xds/xds_client_stats.cc
  37. 10
      src/core/ext/xds/xds_server_config_fetcher.cc
  38. 43
      src/core/lib/channel/handshaker.cc
  39. 12
      src/core/lib/debug/trace.h
  40. 11
      src/core/lib/gpr/log_android.cc
  41. 25
      src/core/lib/gpr/log_internal.h
  42. 10
      src/core/lib/gpr/log_linux.cc
  43. 20
      src/core/lib/gpr/log_posix.cc
  44. 18
      src/core/lib/gpr/log_windows.cc
  45. 7
      src/core/lib/http/parser.cc
  46. 96
      src/core/lib/iomgr/call_combiner.cc
  47. 14
      src/core/lib/iomgr/call_combiner.h
  48. 12
      src/core/lib/iomgr/cfstream_handle.cc
  49. 13
      src/core/lib/iomgr/closure.h
  50. 69
      src/core/lib/iomgr/combiner.cc
  51. 44
      src/core/lib/iomgr/endpoint_cfstream.cc
  52. 39
      src/core/lib/iomgr/error.cc
  53. 6
      src/core/lib/iomgr/ev_apple.cc
  54. 132
      src/core/lib/iomgr/ev_epoll1_linux.cc
  55. 193
      src/core/lib/iomgr/ev_epollex_linux.cc
  56. 47
      src/core/lib/iomgr/ev_poll_posix.cc
  57. 6
      src/core/lib/iomgr/ev_posix.cc
  58. 6
      src/core/lib/iomgr/ev_posix.h
  59. 18
      src/core/lib/iomgr/event_engine/closure.cc
  60. 15
      src/core/lib/iomgr/exec_ctx.cc
  61. 22
      src/core/lib/iomgr/lockfree_event.cc
  62. 132
      src/core/lib/iomgr/resource_quota.cc
  63. 12
      src/core/lib/iomgr/socket_utils_common_posix.cc
  64. 17
      src/core/lib/iomgr/tcp_client_cfstream.cc
  65. 14
      src/core/lib/iomgr/tcp_client_custom.cc
  66. 21
      src/core/lib/iomgr/tcp_client_posix.cc
  67. 28
      src/core/lib/iomgr/tcp_custom.cc
  68. 116
      src/core/lib/iomgr/tcp_posix.cc
  69. 18
      src/core/lib/iomgr/tcp_server_custom.cc
  70. 13
      src/core/lib/iomgr/tcp_server_posix.cc
  71. 16
      src/core/lib/iomgr/tcp_windows.cc
  72. 106
      src/core/lib/iomgr/timer_generic.cc
  73. 44
      src/core/lib/iomgr/timer_manager.cc
  74. 50
      src/core/lib/iomgr/work_serializer.cc
  75. 11
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  76. 68
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  77. 7
      src/core/lib/surface/call.cc
  78. 4
      src/core/lib/surface/server.cc
  79. 24
      src/core/lib/transport/bdp_estimator.cc
  80. 14
      src/core/lib/transport/bdp_estimator.h
  81. 72
      src/core/lib/transport/connectivity_state.cc
  82. 16
      src/core/lib/transport/transport.h
  83. 24
      src/core/tsi/fake_transport_security.cc
  84. 8
      test/core/handshake/client_ssl.cc
  85. 1
      tools/doxygen/Doxyfile.c++.internal
  86. 1
      tools/doxygen/Doxyfile.core.internal

@ -669,6 +669,7 @@ grpc_cc_library(
hdrs = [
"src/core/lib/gpr/alloc.h",
"src/core/lib/gpr/env.h",
"src/core/lib/gpr/log_internal.h",
"src/core/lib/gpr/murmur_hash.h",
"src/core/lib/gpr/spinlock.h",
"src/core/lib/gpr/string.h",

@ -302,6 +302,7 @@ libs:
- src/core/ext/upb-generated/google/rpc/status.upb.h
- src/core/lib/gpr/alloc.h
- src/core/lib/gpr/env.h
- src/core/lib/gpr/log_internal.h
- src/core/lib/gpr/murmur_hash.h
- src/core/lib/gpr/spinlock.h
- src/core/lib/gpr/string.h

2
gRPC-C++.podspec generated

@ -535,6 +535,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/sockaddr.h',
'src/core/lib/gpr/alloc.h',
'src/core/lib/gpr/env.h',
'src/core/lib/gpr/log_internal.h',
'src/core/lib/gpr/murmur_hash.h',
'src/core/lib/gpr/spinlock.h',
'src/core/lib/gpr/string.h',
@ -1200,6 +1201,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/sockaddr.h',
'src/core/lib/gpr/alloc.h',
'src/core/lib/gpr/env.h',
'src/core/lib/gpr/log_internal.h',
'src/core/lib/gpr/murmur_hash.h',
'src/core/lib/gpr/spinlock.h',
'src/core/lib/gpr/string.h',

2
gRPC-Core.podspec generated

@ -886,6 +886,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_internal.h',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
@ -1788,6 +1789,7 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/sockaddr.h',
'src/core/lib/gpr/alloc.h',
'src/core/lib/gpr/env.h',
'src/core/lib/gpr/log_internal.h',
'src/core/lib/gpr/murmur_hash.h',
'src/core/lib/gpr/spinlock.h',
'src/core/lib/gpr/string.h',

1
grpc.gemspec generated

@ -799,6 +799,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gpr/env_windows.cc )
s.files += %w( src/core/lib/gpr/log.cc )
s.files += %w( src/core/lib/gpr/log_android.cc )
s.files += %w( src/core/lib/gpr/log_internal.h )
s.files += %w( src/core/lib/gpr/log_linux.cc )
s.files += %w( src/core/lib/gpr/log_posix.cc )
s.files += %w( src/core/lib/gpr/log_windows.cc )

1
package.xml generated

@ -779,6 +779,7 @@
<file baseinstalldir="/" name="src/core/lib/gpr/env_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log_android.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log_linux.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/log_windows.cc" role="src" />

@ -353,11 +353,9 @@ class DynamicTerminationFilter::CallData {
args, pollent, nullptr,
service_config_call_data->call_dispatch_controller(),
/*is_transparent_retry=*/false);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p dynamic_termination_calld=%p: create lb_call=%p", chand,
client_channel, calld->lb_call_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p dynamic_termination_calld=%p: create lb_call=%p",
chand, client_channel, calld->lb_call_.get());
}
private:
@ -408,9 +406,8 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
}
~ResolverResultHandler() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver shutdown complete", chand_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: resolver shutdown complete", chand_);
GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ResolverResultHandler");
}
@ -451,11 +448,9 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
chand_(chand),
subchannel_(std::move(subchannel)),
health_check_service_name_(std::move(health_check_service_name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: creating subchannel wrapper %p for subchannel %p",
chand, this, subchannel_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: creating subchannel wrapper %p for subchannel %p",
chand, this, subchannel_.get());
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "SubchannelWrapper");
auto* subchannel_node = subchannel_->channelz_node();
if (subchannel_node != nullptr) {
@ -471,11 +466,10 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
}
~SubchannelWrapper() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: destroying subchannel wrapper %p for subchannel %p",
chand_, this, subchannel_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: destroying subchannel wrapper %p for subchannel %p", chand_,
this, subchannel_.get());
chand_->subchannel_wrappers_.erase(this);
auto* subchannel_node = subchannel_->channelz_node();
if (subchannel_node != nullptr) {
@ -537,13 +531,12 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
void UpdateHealthCheckServiceName(
absl::optional<std::string> health_check_service_name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: subchannel wrapper %p: updating health check service "
"name from \"%s\" to \"%s\"",
chand_, this, health_check_service_name_->c_str(),
health_check_service_name->c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: subchannel wrapper %p: updating health check service "
"name from \"%s\" to \"%s\"",
chand_, this, health_check_service_name_->c_str(),
health_check_service_name->c_str());
for (auto& p : watcher_map_) {
WatcherWrapper*& watcher_wrapper = p.second;
// Cancel the current watcher and create a new one using the new
@ -623,12 +616,11 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
}
void OnConnectivityStateChange() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: connectivity change for subchannel wrapper %p "
"subchannel %p; hopping into work_serializer",
parent_->chand_, parent_.get(), parent_->subchannel_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: connectivity change for subchannel wrapper %p "
"subchannel %p; hopping into work_serializer",
parent_->chand_, parent_.get(), parent_->subchannel_.get());
Ref().release(); // ref owned by lambda
parent_->chand_->work_serializer_->Run(
[this]()
@ -658,14 +650,13 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
private:
void ApplyUpdateInControlPlaneWorkSerializer()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_->chand_->work_serializer_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: processing connectivity change in work serializer "
"for subchannel wrapper %p subchannel %p "
"watcher=%p",
parent_->chand_, parent_.get(), parent_->subchannel_.get(),
watcher_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: processing connectivity change in work serializer "
"for subchannel wrapper %p subchannel %p "
"watcher=%p",
parent_->chand_, parent_.get(), parent_->subchannel_.get(),
watcher_.get());
ConnectivityStateChange state_change = PopConnectivityStateChange();
absl::optional<absl::Cord> keepalive_throttling =
state_change.status.GetPayload(kKeepaliveThrottlingKey);
@ -675,10 +666,9 @@ class ClientChannel::SubchannelWrapper : public SubchannelInterface {
&new_keepalive_time)) {
if (new_keepalive_time > parent_->chand_->keepalive_time_) {
parent_->chand_->keepalive_time_ = new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: throttling keepalive time to %d",
parent_->chand_, parent_->chand_->keepalive_time_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: throttling keepalive time to %d",
parent_->chand_, parent_->chand_->keepalive_time_);
// Propagate the new keepalive time to all subchannels. This is so
// that new transports created by any subchannel (and not just the
// subchannel that received the GOAWAY), use the new keepalive time.
@ -1005,9 +995,8 @@ class ClientChannel::ClientChannelControlHelper
void RequestReresolution() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(chand_->work_serializer_) {
if (chand_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: started name re-resolving", chand_);
chand_->resolver_->RequestReresolutionLocked();
}
@ -1089,10 +1078,9 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
subchannel_pool_(GetSubchannelPool(args->channel_args)),
disconnect_error_(GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: creating client_channel for channel stack %p",
this, owning_stack_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: creating client_channel for channel stack %p", this,
owning_stack_);
// Start backup polling.
grpc_client_channel_start_backup_polling(interested_parties_);
// Check client channel factory.
@ -1151,9 +1139,8 @@ ClientChannel::ClientChannel(grpc_channel_element_args* args,
}
ClientChannel::~ClientChannel() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: destroying channel", this);
}
grpc_client_channel_routing_trace.Log(GPR_INFO,
"chand=%p: destroying channel", this);
DestroyResolverAndLbPolicyLocked();
grpc_channel_args_destroy(channel_args_);
GRPC_ERROR_UNREF(resolver_transient_failure_error_);
@ -1224,9 +1211,8 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
// Handle race conditions.
if (resolver_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: got resolver result", this);
}
grpc_client_channel_routing_trace.Log(GPR_INFO,
"chand=%p: got resolver result", this);
// We only want to trace the address resolution in the follow cases:
// (a) Address resolution resulted in service config change.
// (b) Address resolution that causes number of backends to go from
@ -1254,19 +1240,17 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
RefCountedPtr<ServiceConfig> service_config;
RefCountedPtr<ConfigSelector> config_selector;
if (result.service_config_error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver returned service config error: %s",
this, grpc_error_std_string(result.service_config_error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: resolver returned service config error: %s", this,
grpc_error_std_string(result.service_config_error).c_str());
// If the service config was invalid, then fallback to the
// previously returned service config.
if (saved_service_config_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: resolver returned invalid service config. "
"Continuing to use previous service config.",
this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: resolver returned invalid service config. "
"Continuing to use previous service config.",
this);
service_config = saved_service_config_;
config_selector = saved_config_selector_;
} else {
@ -1278,12 +1262,11 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
}
} else if (result.service_config == nullptr) {
// Resolver did not return any service config.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: resolver returned no service config. Using default "
"service config for channel.",
this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: resolver returned no service config. Using default "
"service config for channel.",
this);
service_config = default_service_config_;
} else {
// Use ServiceConfig and ConfigSelector returned by resolver.
@ -1312,8 +1295,9 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
UpdateServiceConfigInControlPlaneLocked(
std::move(service_config), std::move(config_selector),
parsed_service_config, lb_policy_config->name());
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: service config not changed", this);
} else {
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: service config not changed", this);
}
// Create or update LB policy, as needed.
CreateOrUpdateLbPolicyLocked(std::move(lb_policy_config),
@ -1345,10 +1329,9 @@ void ClientChannel::OnResolverErrorLocked(grpc_error_handle error) {
GRPC_ERROR_UNREF(error);
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver transient failure: %s", this,
grpc_error_std_string(error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: resolver transient failure: %s", this,
grpc_error_std_string(error).c_str());
// If we already have an LB policy from a previous resolution
// result, then we continue to let it set the connectivity state.
// Otherwise, we go into TRANSIENT_FAILURE.
@ -1399,10 +1382,8 @@ void ClientChannel::CreateOrUpdateLbPolicyLocked(
lb_policy_ = CreateLbPolicyLocked(*update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: Updating child policy %p", this,
lb_policy_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: Updating child policy %p", this, lb_policy_.get());
lb_policy_->UpdateLocked(std::move(update_args));
}
@ -1417,10 +1398,8 @@ OrphanablePtr<LoadBalancingPolicy> ClientChannel::CreateLbPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_client_channel_routing_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: created new LB policy %p", this,
lb_policy.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: created new LB policy %p", this, lb_policy.get());
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties_);
return lb_policy;
@ -1457,11 +1436,9 @@ void ClientChannel::UpdateServiceConfigInControlPlaneLocked(
const char* lb_policy_name) {
UniquePtr<char> service_config_json(
gpr_strdup(service_config->json_string().c_str()));
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: resolver returned updated service config: \"%s\"", this,
service_config_json.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: resolver returned updated service config: \"%s\"",
this, service_config_json.get());
// Save service config.
saved_service_config_ = std::move(service_config);
// Update health check service name if needed.
@ -1484,10 +1461,9 @@ void ClientChannel::UpdateServiceConfigInControlPlaneLocked(
}
// Save config selector.
saved_config_selector_ = std::move(config_selector);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: using ConfigSelector %p", this,
saved_config_selector_.get());
}
grpc_client_channel_routing_trace.Log(GPR_INFO,
"chand=%p: using ConfigSelector %p",
this, saved_config_selector_.get());
}
void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
@ -1495,10 +1471,9 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
RefCountedPtr<ServiceConfig> service_config = saved_service_config_;
// Grab ref to config selector. Use default if resolver didn't supply one.
RefCountedPtr<ConfigSelector> config_selector = saved_config_selector_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: switching to ConfigSelector %p", this,
saved_config_selector_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: switching to ConfigSelector %p", this,
saved_config_selector_.get());
if (config_selector == nullptr) {
config_selector =
MakeRefCounted<DefaultConfigSelector>(saved_service_config_);
@ -1568,9 +1543,8 @@ void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
}
void ClientChannel::CreateResolverLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: starting name resolution", this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: starting name resolution", this);
resolver_ = ResolverRegistry::CreateResolver(
target_uri_.get(), channel_args_, interested_parties_, work_serializer_,
absl::make_unique<ResolverResultHandler>(this));
@ -1581,23 +1555,19 @@ void ClientChannel::CreateResolverLocked() {
GRPC_CHANNEL_CONNECTING, absl::Status(), "started resolving",
absl::make_unique<LoadBalancingPolicy::QueuePicker>(nullptr));
resolver_->StartLocked();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: created resolver=%p", this, resolver_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: created resolver=%p", this, resolver_.get());
}
void ClientChannel::DestroyResolverAndLbPolicyLocked() {
if (resolver_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: shutting down resolver=%p", this,
resolver_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: shutting down resolver=%p", this, resolver_.get());
resolver_.reset();
if (lb_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
@ -1650,12 +1620,11 @@ void ClientChannel::UpdateStateAndPickerLocked(
MutexLock lock(&data_plane_mu_);
// Handle subchannel updates.
for (auto& p : pending_subchannel_updates_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p: updating subchannel wrapper %p data plane "
"connected_subchannel to %p",
this, p.first.get(), p.second.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p: updating subchannel wrapper %p data plane "
"connected_subchannel to %p",
this, p.first.get(), p.second.get());
// Note: We do not remove the entry from pending_subchannel_updates_
// here, since this would unref the subchannel wrapper; instead,
// we wait until we've released the lock to clear the map.
@ -1786,10 +1755,9 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
}
// Disconnect or enter IDLE.
if (op->disconnect_with_error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
grpc_error_std_string(op->disconnect_with_error).c_str());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
grpc_error_std_string(op->disconnect_with_error).c_str());
DestroyResolverAndLbPolicyLocked();
intptr_t value;
if (grpc_error_get_int(op->disconnect_with_error,
@ -1936,9 +1904,8 @@ ClientChannel::CallData::CallData(grpc_call_element* elem,
owning_call_(args.call_stack),
call_combiner_(args.call_combiner),
call_context_(args.context) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", &chand, this);
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: created call", &chand, this);
}
ClientChannel::CallData::~CallData() {
@ -1991,10 +1958,9 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
// Note that once we have done so, we do not need to acquire the channel's
// resolution mutex, which is more efficient (especially for streaming calls).
if (calld->dynamic_call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
chand, calld, calld->dynamic_call_.get());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p", chand,
calld, calld->dynamic_call_.get());
calld->dynamic_call_->StartTransportStreamOpBatch(batch);
return;
}
@ -2002,11 +1968,9 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(calld->cancel_error_ != GRPC_ERROR_NONE)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld,
grpc_error_std_string(calld->cancel_error_).c_str());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: failing batch with error: %s", chand,
calld, grpc_error_std_string(calld->cancel_error_).c_str());
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
@ -2022,10 +1986,9 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
GRPC_ERROR_UNREF(calld->cancel_error_);
calld->cancel_error_ =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_std_string(calld->cancel_error_).c_str());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand, calld,
grpc_error_std_string(calld->cancel_error_).c_str());
// Fail all pending batches.
calld->PendingBatchesFail(elem, GRPC_ERROR_REF(calld->cancel_error_),
NoYieldCallCombiner);
@ -2040,20 +2003,17 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
// channel's resolution mutex to apply the service config to the call,
// after which we will create a dynamic call.
if (GPR_LIKELY(batch->send_initial_metadata)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: grabbing resolution mutex to apply service "
"config",
chand, calld);
}
grpc_client_channel_call_trace.Log(
GPR_INFO,
"chand=%p calld=%p: grabbing resolution mutex to apply service "
"config",
chand, calld);
CheckResolution(elem, GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yielding call combiner", chand,
calld);
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: saved batch, yielding call combiner",
chand, calld);
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
"batch does not include send_initial_metadata");
}
@ -2088,11 +2048,9 @@ void ClientChannel::CallData::PendingBatchesAdd(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
this, idx);
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand, this, idx);
grpc_transport_stream_op_batch*& pending = pending_batches_[idx];
GPR_ASSERT(pending == nullptr);
pending = batch;
@ -2210,13 +2168,12 @@ class ClientChannel::CallData::ResolverQueuedCallCanceller {
auto* calld = static_cast<CallData*>(self->elem_->call_data);
{
MutexLock lock(&chand->resolution_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling resolver queued pick: "
"error=%s self=%p calld->resolver_pick_canceller=%p",
chand, calld, grpc_error_std_string(error).c_str(), self,
calld->resolver_call_canceller_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p calld=%p: cancelling resolver queued pick: "
"error=%s self=%p calld->resolver_pick_canceller=%p",
chand, calld, grpc_error_std_string(error).c_str(), self,
calld->resolver_call_canceller_);
if (calld->resolver_call_canceller_ == self && error != GRPC_ERROR_NONE) {
// Remove pick from list of queued picks.
calld->MaybeRemoveCallFromResolverQueuedCallsLocked(self->elem_);
@ -2237,11 +2194,9 @@ void ClientChannel::CallData::MaybeRemoveCallFromResolverQueuedCallsLocked(
grpc_call_element* elem) {
if (!queued_pending_resolver_result_) return;
auto* chand = static_cast<ClientChannel*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: removing from resolver queued picks list",
chand, this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p calld=%p: removing from resolver queued picks list",
chand, this);
chand->RemoveResolverQueuedCall(&resolver_queued_call_, pollent_);
queued_pending_resolver_result_ = false;
// Lame the call combiner canceller.
@ -2252,10 +2207,9 @@ void ClientChannel::CallData::MaybeAddCallToResolverQueuedCallsLocked(
grpc_call_element* elem) {
if (queued_pending_resolver_result_) return;
auto* chand = static_cast<ClientChannel*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: adding to resolver queued picks list",
chand, this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p calld=%p: adding to resolver queued picks list",
chand, this);
queued_pending_resolver_result_ = true;
resolver_queued_call_.elem = elem;
chand->AddResolverQueuedCall(&resolver_queued_call_, pollent_);
@ -2266,10 +2220,9 @@ void ClientChannel::CallData::MaybeAddCallToResolverQueuedCallsLocked(
grpc_error_handle ClientChannel::CallData::ApplyServiceConfigToCallLocked(
grpc_call_element* elem, grpc_metadata_batch* initial_metadata) {
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p calld=%p: applying service config to call", chand,
this);
ConfigSelector* config_selector = chand->config_selector_.get();
if (config_selector != nullptr) {
// Use the ConfigSelector to determine the config for the call.
@ -2361,11 +2314,9 @@ void ClientChannel::CallData::ResolutionDone(void* arg,
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: error applying config to call: error=%s",
chand, calld, grpc_error_std_string(error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p calld=%p: error applying config to call: error=%s",
chand, calld, grpc_error_std_string(error).c_str());
calld->PendingBatchesFail(elem, GRPC_ERROR_REF(error), YieldCallCombiner);
return;
}
@ -2462,19 +2413,15 @@ void ClientChannel::CallData::CreateDynamicCall(grpc_call_element* elem) {
call_combiner_};
grpc_error_handle error = GRPC_ERROR_NONE;
DynamicFilters* channel_stack = args.channel_stack.get();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p: creating dynamic call stack on channel_stack=%p",
chand, this, channel_stack);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p calld=%p: creating dynamic call stack on channel_stack=%p",
chand, this, channel_stack);
dynamic_call_ = channel_stack->CreateCall(std::move(args), &error);
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create dynamic call: error=%s",
chand, this, grpc_error_std_string(error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p calld=%p: failed to create dynamic call: error=%s",
chand, this, grpc_error_std_string(error).c_str());
PendingBatchesFail(elem, error, YieldCallCombiner);
return;
}
@ -2667,11 +2614,9 @@ size_t ClientChannel::LoadBalancedCall::GetBatchIndex(
void ClientChannel::LoadBalancedCall::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
GPR_ASSERT(pending_batches_[idx] == nullptr);
pending_batches_[idx] = batch;
}
@ -2828,11 +2773,9 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// the channel's data plane mutex, which is more efficient (especially for
// streaming calls).
if (subchannel_call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: starting batch on subchannel_call=%p",
chand_, this, subchannel_call_.get());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: starting batch on subchannel_call=%p",
chand_, this, subchannel_call_.get());
subchannel_call_->StartTransportStreamOpBatch(batch);
return;
}
@ -2840,10 +2783,9 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(cancel_error_ != GRPC_ERROR_NONE)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: failing batch with error: %s",
chand_, this, grpc_error_std_string(cancel_error_).c_str());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: failing batch with error: %s", chand_,
this, grpc_error_std_string(cancel_error_).c_str());
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
@ -2858,10 +2800,9 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// error to the caller when the first batch does get passed down.
GRPC_ERROR_UNREF(cancel_error_);
cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
chand_, this, grpc_error_std_string(cancel_error_).c_str());
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s", chand_,
this, grpc_error_std_string(cancel_error_).c_str());
// Fail all pending batches.
PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
// Note: This will release the call combiner.
@ -2874,19 +2815,16 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// For batches containing a send_initial_metadata op, acquire the
// channel's data plane mutex to pick a subchannel.
if (GPR_LIKELY(batch->send_initial_metadata)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: grabbing data plane mutex to perform pick",
chand_, this);
}
grpc_client_channel_call_trace.Log(
GPR_INFO,
"chand=%p lb_call=%p: grabbing data plane mutex to perform pick",
chand_, this);
PickSubchannel(this, GRPC_ERROR_NONE);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: saved batch, yielding call combiner",
chand_, this);
}
grpc_client_channel_call_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: saved batch, yielding call combiner",
chand_, this);
GRPC_CALL_COMBINER_STOP(call_combiner_,
"batch does not include send_initial_metadata");
}
@ -2989,11 +2927,10 @@ void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
call_context_, call_combiner_};
grpc_error_handle error = GRPC_ERROR_NONE;
subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: create subchannel_call=%p: error=%s", chand_,
this, subchannel_call_.get(), grpc_error_std_string(error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: create subchannel_call=%p: error=%s",
chand_, this, subchannel_call_.get(),
grpc_error_std_string(error).c_str());
if (on_call_destruction_complete_ != nullptr) {
subchannel_call_->SetAfterCallStackDestroy(on_call_destruction_complete_);
on_call_destruction_complete_ = nullptr;
@ -3028,13 +2965,12 @@ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
auto* chand = lb_call->chand_;
{
MutexLock lock(&chand->data_plane_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: cancelling queued pick: "
"error=%s self=%p calld->pick_canceller=%p",
chand, lb_call, grpc_error_std_string(error).c_str(), self,
lb_call->lb_call_canceller_);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p lb_call=%p: cancelling queued pick: "
"error=%s self=%p calld->pick_canceller=%p",
chand, lb_call, grpc_error_std_string(error).c_str(), self,
lb_call->lb_call_canceller_);
if (lb_call->lb_call_canceller_ == self && error != GRPC_ERROR_NONE) {
lb_call->call_dispatch_controller_->Commit();
// Remove pick from list of queued picks.
@ -3054,10 +2990,9 @@ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
void ClientChannel::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
if (!queued_pending_lb_pick_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: removing from queued picks list",
chand_, this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: removing from queued picks list", chand_,
this);
chand_->RemoveLbQueuedCall(&queued_call_, pollent_);
queued_pending_lb_pick_ = false;
// Lame the call combiner canceller.
@ -3066,10 +3001,9 @@ void ClientChannel::LoadBalancedCall::MaybeRemoveCallFromLbQueuedCallsLocked() {
void ClientChannel::LoadBalancedCall::MaybeAddCallToLbQueuedCallsLocked() {
if (queued_pending_lb_pick_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: adding to queued picks list",
chand_, this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: adding to queued picks list", chand_,
this);
queued_pending_lb_pick_ = true;
queued_call_.lb_call = this;
chand_->AddLbQueuedCall(&queued_call_, pollent_);
@ -3087,11 +3021,9 @@ void ClientChannel::LoadBalancedCall::PickDone(void* arg,
grpc_error_handle error) {
auto* self = static_cast<LoadBalancedCall*>(arg);
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: failed to pick subchannel: error=%s",
self->chand_, self, grpc_error_std_string(error).c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: failed to pick subchannel: error=%s",
self->chand_, self, grpc_error_std_string(error).c_str());
self->PendingBatchesFail(GRPC_ERROR_REF(error), YieldCallCombiner);
return;
}
@ -3137,11 +3069,10 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
// CompletePick
[this](LoadBalancingPolicy::PickResult::Complete* complete_pick)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: LB pick succeeded: subchannel=%p",
chand_, this, complete_pick->subchannel.get());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO,
"chand=%p lb_call=%p: LB pick succeeded: subchannel=%p", chand_,
this, complete_pick->subchannel.get());
GPR_ASSERT(complete_pick->subchannel != nullptr);
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.
@ -3156,10 +3087,8 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
// QueuePick
[this](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick queued", chand_,
this);
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: LB pick queued", chand_, this);
MaybeAddCallToLbQueuedCallsLocked();
return false;
},
@ -3167,10 +3096,9 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
[this, send_initial_metadata_flags,
&error](LoadBalancingPolicy::PickResult::Fail* fail_pick)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick failed: %s",
chand_, this, fail_pick->status.ToString().c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: LB pick failed: %s", chand_,
this, fail_pick->status.ToString().c_str());
// If we're shutting down, fail all RPCs.
grpc_error_handle disconnect_error = chand_->disconnect_error();
if (disconnect_error != GRPC_ERROR_NONE) {
@ -3198,10 +3126,9 @@ bool ClientChannel::LoadBalancedCall::PickSubchannelLocked(
// DropPick
[this, &error](LoadBalancingPolicy::PickResult::Drop* drop_pick)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&ClientChannel::data_plane_mu_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_routing_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick dropped: %s",
chand_, this, drop_pick->status.ToString().c_str());
}
grpc_client_channel_routing_trace.Log(
GPR_INFO, "chand=%p lb_call=%p: LB pick dropped: %s", chand_,
this, drop_pick->status.ToString().c_str());
*error =
grpc_error_set_int(absl_status_to_grpc_error(drop_pick->status),
GRPC_ERROR_INT_LB_POLICY_DROP, 1);

@ -68,18 +68,16 @@ HealthCheckClient::HealthCheckClient(
.set_jitter(HEALTH_CHECK_RECONNECT_JITTER)
.set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS *
1000)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "created HealthCheckClient %p", this);
}
grpc_health_check_client_trace.Log(GPR_INFO, "created HealthCheckClient %p",
this);
GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this,
grpc_schedule_on_exec_ctx);
StartCall();
}
HealthCheckClient::~HealthCheckClient() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this);
}
grpc_health_check_client_trace.Log(GPR_INFO,
"destroying HealthCheckClient %p", this);
}
void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state,
@ -90,10 +88,9 @@ void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state,
void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: setting state=%s reason=%s", this,
ConnectivityStateName(state), reason);
}
grpc_health_check_client_trace.Log(
GPR_INFO, "HealthCheckClient %p: setting state=%s reason=%s", this,
ConnectivityStateName(state), reason);
if (watcher_ != nullptr) {
watcher_->Notify(state,
state == GRPC_CHANNEL_TRANSIENT_FAILURE
@ -103,9 +100,8 @@ void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state,
}
void HealthCheckClient::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: shutting down", this);
}
grpc_health_check_client_trace.Log(
GPR_INFO, "HealthCheckClient %p: shutting down", this);
{
MutexLock lock(&mu_);
shutting_down_ = true;
@ -128,10 +124,9 @@ void HealthCheckClient::StartCallLocked() {
GPR_ASSERT(call_state_ == nullptr);
SetHealthStatusLocked(GRPC_CHANNEL_CONNECTING, "starting health watch");
call_state_ = MakeOrphanable<CallState>(Ref(), interested_parties_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: created CallState %p", this,
call_state_.get());
}
grpc_health_check_client_trace.Log(
GPR_INFO, "HealthCheckClient %p: created CallState %p", this,
call_state_.get());
call_state_->StartCall();
}
@ -164,10 +159,8 @@ void HealthCheckClient::OnRetryTimer(void* arg, grpc_error_handle error) {
self->retry_timer_callback_pending_ = false;
if (!self->shutting_down_ && error == GRPC_ERROR_NONE &&
self->call_state_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: restarting health check call",
self);
}
grpc_health_check_client_trace.Log(
GPR_INFO, "HealthCheckClient %p: restarting health check call", self);
self->StartCallLocked();
}
}
@ -257,10 +250,9 @@ HealthCheckClient::CallState::CallState(
payload_(context_) {}
HealthCheckClient::CallState::~CallState() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
health_check_client_.get(), this);
}
grpc_health_check_client_trace.Log(
GPR_INFO, "HealthCheckClient %p: destroying CallState %p",
health_check_client_.get(), this);
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) {
if (context_[i].destroy != nullptr) {
context_[i].destroy(context_[i].value);
@ -562,12 +554,11 @@ void HealthCheckClient::CallState::RecvTrailingMetadataReady(
status = grpc_get_status_code_from_metadata(
self->recv_trailing_metadata_.idx.named.grpc_status->md);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_health_check_client_trace)) {
gpr_log(GPR_INFO,
"HealthCheckClient %p CallState %p: health watch failed with "
"status %d",
self->health_check_client_.get(), self, status);
}
grpc_health_check_client_trace.Log(
GPR_INFO,
"HealthCheckClient %p CallState %p: health watch failed with "
"status %d",
self->health_check_client_.get(), self, status);
// Clean up.
grpc_metadata_batch_destroy(&self->recv_trailing_metadata_);
// For status UNIMPLEMENTED, give up and assume always healthy.

@ -53,13 +53,12 @@ class ChildPolicyHandler::Helper
// it reports something other than CONNECTING, at which point we swap it
// into place.
if (CalledByPendingChild()) {
if (GRPC_TRACE_FLAG_ENABLED(*(parent_->tracer_))) {
gpr_log(GPR_INFO,
"[child_policy_handler %p] helper %p: pending child policy %p "
"reports state=%s (%s)",
parent_.get(), this, child_, ConnectivityStateName(state),
status.ToString().c_str());
}
parent_->tracer_->Log(
GPR_INFO,
"[child_policy_handler %p] helper %p: pending child policy %p "
"reports state=%s (%s)",
parent_.get(), this, child_, ConnectivityStateName(state),
status.ToString().c_str());
if (state == GRPC_CHANNEL_CONNECTING) return;
grpc_pollset_set_del_pollset_set(
parent_->child_policy_->interested_parties(),
@ -83,10 +82,9 @@ class ChildPolicyHandler::Helper
? parent_->pending_child_policy_.get()
: parent_->child_policy_.get();
if (child_ != latest_child_policy) return;
if (GRPC_TRACE_FLAG_ENABLED(*(parent_->tracer_))) {
gpr_log(GPR_INFO, "[child_policy_handler %p] started name re-resolving",
parent_.get());
}
parent_->tracer_->Log(GPR_INFO,
"[child_policy_handler %p] started name re-resolving",
parent_.get());
parent_->channel_control_helper()->RequestReresolution();
}
@ -119,25 +117,20 @@ class ChildPolicyHandler::Helper
//
void ChildPolicyHandler::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "[child_policy_handler %p] shutting down", this);
}
tracer_->Log(GPR_INFO, "[child_policy_handler %p] shutting down", this);
shutting_down_ = true;
if (child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "[child_policy_handler %p] shutting down lb_policy %p",
this, child_policy_.get());
}
tracer_->Log(GPR_INFO,
"[child_policy_handler %p] shutting down lb_policy %p", this,
child_policy_.get());
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
child_policy_.reset();
}
if (pending_child_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[child_policy_handler %p] shutting down pending lb_policy %p",
this, pending_child_policy_.get());
}
tracer_->Log(GPR_INFO,
"[child_policy_handler %p] shutting down pending lb_policy %p",
this, pending_child_policy_.get());
grpc_pollset_set_del_pollset_set(
pending_child_policy_->interested_parties(), interested_parties());
pending_child_policy_.reset();
@ -210,11 +203,9 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
// that there's an upper bound on the amount of time it takes us to
// switch to the new policy, even if the new policy stays in
// CONNECTING for a very long period of time.
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[child_policy_handler %p] creating new %schild policy %s", this,
child_policy_ == nullptr ? "" : "pending ", args.config->name());
}
tracer_->Log(
GPR_INFO, "[child_policy_handler %p] creating new %schild policy %s",
this, child_policy_ == nullptr ? "" : "pending ", args.config->name());
auto& lb_policy =
child_policy_ == nullptr ? child_policy_ : pending_child_policy_;
lb_policy = CreateChildPolicy(args.config->name(), *args.args);
@ -229,12 +220,10 @@ void ChildPolicyHandler::UpdateLocked(UpdateArgs args) {
}
GPR_ASSERT(policy_to_update != nullptr);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "[child_policy_handler %p] updating %schild policy %p",
this,
policy_to_update == pending_child_policy_.get() ? "pending " : "",
policy_to_update);
}
tracer_->Log(
GPR_INFO, "[child_policy_handler %p] updating %schild policy %p", this,
policy_to_update == pending_child_policy_.get() ? "pending " : "",
policy_to_update);
policy_to_update->UpdateLocked(std::move(args));
}
@ -271,11 +260,9 @@ OrphanablePtr<LoadBalancingPolicy> ChildPolicyHandler::CreateChildPolicy(
return nullptr;
}
helper->set_child(lb_policy.get());
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[child_policy_handler %p] created new LB policy \"%s\" (%p)", this,
child_policy_name, lb_policy.get());
}
tracer_->Log(GPR_INFO,
"[child_policy_handler %p] created new LB policy \"%s\" (%p)",
this, child_policy_name, lb_policy.get());
channel_control_helper()->AddTraceEvent(
ChannelControlHelper::TRACE_INFO,
absl::StrCat("Created new LB policy \"", child_policy_name, "\""));

@ -709,14 +709,12 @@ void GrpcLb::Helper::UpdateState(grpc_connectivity_state state,
parent_->lb_calld_->client_stats() != nullptr) {
client_stats = parent_->lb_calld_->client_stats()->Ref();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p helper %p] state=%s (%s) wrapping child "
"picker %p (serverlist=%p, client_stats=%p)",
parent_.get(), this, ConnectivityStateName(state),
status.ToString().c_str(), picker.get(), serverlist.get(),
client_stats.get());
}
grpc_lb_glb_trace.Log(GPR_INFO,
"[grpclb %p helper %p] state=%s (%s) wrapping child "
"picker %p (serverlist=%p, client_stats=%p)",
parent_.get(), this, ConnectivityStateName(state),
status.ToString().c_str(), picker.get(),
serverlist.get(), client_stats.get());
parent_->channel_control_helper()->UpdateState(
state, status,
absl::make_unique<Picker>(std::move(serverlist), std::move(picker),
@ -817,10 +815,9 @@ void GrpcLb::BalancerCallState::Orphan() {
void GrpcLb::BalancerCallState::StartQuery() {
GPR_ASSERT(lb_call_ != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] lb_calld=%p: Starting LB call %p",
grpclb_policy_.get(), this, lb_call_);
}
grpc_lb_glb_trace.Log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Starting LB call %p",
grpclb_policy_.get(), this, lb_call_);
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -1062,18 +1059,18 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
if (response.client_stats_report_interval != 0) {
client_stats_report_interval_ =
GPR_MAX(GPR_MS_PER_SEC, response.client_stats_report_interval);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy(), this, client_stats_report_interval_);
}
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting NOT enabled",
grpclb_policy(), this);
grpc_lb_glb_trace.Log(
GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting interval = %" PRId64
" milliseconds",
grpclb_policy(), this, client_stats_report_interval_);
} else {
grpc_lb_glb_trace.Log(
GPR_INFO,
"[grpclb %p] lb_calld=%p: Received initial LB response "
"message; client load reporting NOT enabled",
grpclb_policy(), this);
}
seen_initial_response_ = true;
break;
@ -1082,14 +1079,12 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
GPR_ASSERT(lb_call_ != nullptr);
auto serverlist_wrapper =
MakeRefCounted<Serverlist>(std::move(response.serverlist));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
" servers received:\n%s",
grpclb_policy(), this,
serverlist_wrapper->serverlist().size(),
serverlist_wrapper->AsText().c_str());
}
grpc_lb_glb_trace.Log(
GPR_INFO,
"[grpclb %p] lb_calld=%p: Serverlist with %" PRIuPTR
" servers received:\n%s",
grpclb_policy(), this, serverlist_wrapper->serverlist().size(),
serverlist_wrapper->AsText().c_str());
seen_serverlist_ = true;
// Start sending client load report only after we start using the
// serverlist returned from the current LB call.
@ -1102,12 +1097,11 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
// Check if the serverlist differs from the previous one.
if (grpclb_policy()->serverlist_ != nullptr &&
*grpclb_policy()->serverlist_ == *serverlist_wrapper) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] lb_calld=%p: Incoming server list identical "
"to current, ignoring.",
grpclb_policy(), this);
}
grpc_lb_glb_trace.Log(
GPR_INFO,
"[grpclb %p] lb_calld=%p: Incoming server list identical "
"to current, ignoring.",
grpclb_policy(), this);
} else { // New serverlist.
// Dispose of the fallback.
// TODO(roth): Ideally, we should stay in fallback mode until we
@ -1345,11 +1339,9 @@ GrpcLb::GrpcLb(Args args)
absl::StatusOr<URI> uri = URI::Parse(server_uri);
GPR_ASSERT(uri.ok() && !uri->path().empty());
server_name_ = std::string(absl::StripPrefix(uri->path(), "/"));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] Will use '%s' as the server name for LB request.",
this, server_name_.c_str());
}
grpc_lb_glb_trace.Log(
GPR_INFO, "[grpclb %p] Will use '%s' as the server name for LB request.",
this, server_name_.c_str());
// Record LB call timeout.
arg = grpc_channel_args_find(args.args, GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS);
lb_call_timeout_ms_ = grpc_channel_arg_get_integer(arg, {0, 0, INT_MAX});
@ -1508,11 +1500,9 @@ void GrpcLb::StartBalancerCallLocked() {
// Init the LB call data.
GPR_ASSERT(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
this, lb_channel_, lb_calld_.get());
}
grpc_lb_glb_trace.Log(
GPR_INFO, "[grpclb %p] Query for backends (lb_channel: %p, lb_calld: %p)",
this, lb_channel_, lb_calld_.get());
lb_calld_->StartQuery();
}
@ -1551,9 +1541,8 @@ void GrpcLb::OnBalancerCallRetryTimer(void* arg, grpc_error_handle error) {
void GrpcLb::OnBalancerCallRetryTimerLocked(grpc_error_handle error) {
retry_timer_callback_pending_ = false;
if (!shutting_down_ && error == GRPC_ERROR_NONE && lb_calld_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", this);
}
grpc_lb_glb_trace.Log(GPR_INFO, "[grpclb %p] Restarting call to LB server",
this);
StartBalancerCallLocked();
}
Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
@ -1635,10 +1624,9 @@ OrphanablePtr<LoadBalancingPolicy> GrpcLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_glb_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Created new child policy handler (%p)", this,
lb_policy.get());
}
grpc_lb_glb_trace.Log(GPR_INFO,
"[grpclb %p] Created new child policy handler (%p)",
this, lb_policy.get());
// Add the gRPC LB's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on gRPC LB, which in turn is tied to the application's call.
@ -1673,10 +1661,9 @@ void GrpcLb::CreateOrUpdateChildPolicyLocked() {
child_policy_ = CreateChildPolicyLocked(update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Updating child policy handler %p", this,
child_policy_.get());
}
grpc_lb_glb_trace.Log(GPR_INFO,
"[grpclb %p] Updating child policy handler %p", this,
child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}

@ -140,23 +140,17 @@ class PickFirst : public LoadBalancingPolicy {
};
PickFirst::PickFirst(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p created.", this);
}
grpc_lb_pick_first_trace.Log(GPR_INFO, "Pick First %p created.", this);
}
PickFirst::~PickFirst() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Destroying Pick First %p", this);
}
grpc_lb_pick_first_trace.Log(GPR_INFO, "Destroying Pick First %p", this);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
}
void PickFirst::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
}
grpc_lb_pick_first_trace.Log(GPR_INFO, "Pick First %p Shutting down", this);
shutdown_ = true;
subchannel_list_.reset();
latest_pending_subchannel_list_.reset();
@ -165,9 +159,7 @@ void PickFirst::ShutdownLocked() {
void PickFirst::ExitIdleLocked() {
if (shutdown_) return;
if (idle_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p exiting idle", this);
}
grpc_lb_pick_first_trace.Log(GPR_INFO, "Pick First %p exiting idle", this);
idle_ = false;
AttemptToConnectUsingLatestUpdateArgsLocked();
}
@ -234,13 +226,11 @@ void PickFirst::AttemptToConnectUsingLatestUpdateArgsLocked() {
// We do have a selected subchannel (which means it's READY), so keep
// using it until one of the subchannels in the new list reports READY.
if (latest_pending_subchannel_list_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_.get(),
subchannel_list.get());
}
grpc_lb_pick_first_trace.Log(
GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_.get(), subchannel_list.get());
}
latest_pending_subchannel_list_ = std::move(subchannel_list);
// If we're not in IDLE state, start trying to connect to the first
@ -257,11 +247,9 @@ void PickFirst::AttemptToConnectUsingLatestUpdateArgsLocked() {
}
void PickFirst::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p received update with %" PRIuPTR " addresses", this,
args.addresses.size());
}
grpc_lb_pick_first_trace.Log(
GPR_INFO, "Pick First %p received update with %" PRIuPTR " addresses",
this, args.addresses.size());
// Update the latest_update_args_
grpc_arg new_arg = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_INHIBIT_HEALTH_CHECKING), 1);
@ -287,22 +275,20 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
GPR_ASSERT(connectivity_state != GRPC_CHANNEL_SHUTDOWN);
// Handle updates for the currently selected subchannel.
if (p->selected_ == this) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p selected subchannel connectivity changed to %s", p,
ConnectivityStateName(connectivity_state));
}
grpc_lb_pick_first_trace.Log(
GPR_INFO,
"Pick First %p selected subchannel connectivity changed to %s", p,
ConnectivityStateName(connectivity_state));
// If the new state is anything other than READY and there is a
// pending update, switch to the pending update.
if (connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
}
grpc_lb_pick_first_trace.Log(
GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
p->selected_ = nullptr;
CancelConnectivityWatchLocked(
"selected subchannel failed; switching to pending update");
@ -427,19 +413,16 @@ void PickFirst::PickFirstSubchannelData::ProcessUnselectedReadyLocked() {
subchannel_list() == p->latest_pending_subchannel_list_.get());
// Case 2. Promote p->latest_pending_subchannel_list_ to p->subchannel_list_.
if (subchannel_list() == p->latest_pending_subchannel_list_.get()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(),
p->subchannel_list_.get());
}
grpc_lb_pick_first_trace.Log(
GPR_INFO,
"Pick First %p promoting pending subchannel list %p to "
"replace %p",
p, p->latest_pending_subchannel_list_.get(), p->subchannel_list_.get());
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
// Cases 1 and 2.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_pick_first_trace)) {
gpr_log(GPR_INFO, "Pick First %p selected subchannel %p", p, subchannel());
}
grpc_lb_pick_first_trace.Log(GPR_INFO, "Pick First %p selected subchannel %p",
p, subchannel());
p->selected_ = this;
p->channel_control_helper()->UpdateState(
GRPC_CHANNEL_READY, absl::Status(),

@ -246,22 +246,17 @@ PriorityLb::PriorityLb(Args args)
child_failover_timeout_ms_(grpc_channel_args_find_integer(
args.args, GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS,
{kDefaultChildFailoverTimeoutMs, 0, INT_MAX})) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] created", this);
}
grpc_lb_priority_trace.Log(GPR_INFO, "[priority_lb %p] created", this);
}
PriorityLb::~PriorityLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] destroying priority LB policy", this);
}
grpc_lb_priority_trace.Log(
GPR_INFO, "[priority_lb %p] destroying priority LB policy", this);
grpc_channel_args_destroy(args_);
}
void PriorityLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] shutting down", this);
}
grpc_lb_priority_trace.Log(GPR_INFO, "[priority_lb %p] shutting down", this);
shutting_down_ = true;
children_.clear();
}
@ -269,11 +264,10 @@ void PriorityLb::ShutdownLocked() {
void PriorityLb::ExitIdleLocked() {
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] exiting IDLE for current priority %d child %s",
this, current_priority_, child_name.c_str());
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] exiting IDLE for current priority %d child %s", this,
current_priority_, child_name.c_str());
children_[child_name]->ExitIdleLocked();
}
}
@ -283,9 +277,8 @@ void PriorityLb::ResetBackoffLocked() {
}
void PriorityLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] received update", this);
}
grpc_lb_priority_trace.Log(GPR_INFO, "[priority_lb %p] received update",
this);
// Save current child.
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
@ -336,12 +329,11 @@ void PriorityLb::HandleChildConnectivityStateChangeLocked(
// Special case for the child that was the current child before the
// most recent update.
if (child == current_child_from_before_update_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] state update for current child from before "
"config update",
this);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] state update for current child from before "
"config update",
this);
if (child->connectivity_state() == GRPC_CHANNEL_READY ||
child->connectivity_state() == GRPC_CHANNEL_IDLE) {
// If it's still READY or IDLE, we stick with this child, so pass
@ -362,12 +354,11 @@ void PriorityLb::HandleChildConnectivityStateChangeLocked(
}
// Otherwise, find the child's priority.
uint32_t child_priority = GetChildPriorityLocked(child->name());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] state update for priority %u, child %s, current "
"priority %u",
this, child_priority, child->name().c_str(), current_priority_);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] state update for priority %u, child %s, current "
"priority %u",
this, child_priority, child->name().c_str(), current_priority_);
// Ignore priorities not in the current config.
if (child_priority == UINT32_MAX) return;
// Ignore lower-than-current priorities.
@ -419,10 +410,9 @@ void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
++priority) {
// If the child for the priority does not exist yet, create it.
const std::string& child_name = config_->priorities()[priority];
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] trying priority %u, child %s", this,
priority, child_name.c_str());
}
grpc_lb_priority_trace.Log(GPR_INFO,
"[priority_lb %p] trying priority %u, child %s",
this, priority, child_name.c_str());
auto& child = children_[child_name];
if (child == nullptr) {
if (report_connecting) {
@ -449,12 +439,11 @@ void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
// Child is not READY or IDLE.
// If its failover timer is still pending, give it time to fire.
if (child->failover_timer_callback_pending()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] priority %u, child %s: child still "
"attempting to connect, will wait",
this, priority, child_name.c_str());
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] priority %u, child %s: child still "
"attempting to connect, will wait",
this, priority, child_name.c_str());
if (report_connecting) {
channel_control_helper()->UpdateState(
GRPC_CHANNEL_CONNECTING, absl::Status(),
@ -465,12 +454,11 @@ void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
// Child has been failing for a while. Move on to the next priority.
}
// If there are no more priorities to try, report TRANSIENT_FAILURE.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] no priority reachable, putting channel in "
"TRANSIENT_FAILURE",
this);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] no priority reachable, putting channel in "
"TRANSIENT_FAILURE",
this);
current_child_from_before_update_ = nullptr;
absl::Status status = absl::UnavailableError("no ready priority");
channel_control_helper()->UpdateState(
@ -479,10 +467,9 @@ void PriorityLb::TryNextPriorityLocked(bool report_connecting) {
}
void PriorityLb::SelectPriorityLocked(uint32_t priority) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] selected priority %u, child %s", this,
priority, config_->priorities()[priority].c_str());
}
grpc_lb_priority_trace.Log(
GPR_INFO, "[priority_lb %p] selected priority %u, child %s", this,
priority, config_->priorities()[priority].c_str());
current_priority_ = priority;
current_child_from_before_update_ = nullptr;
// Deactivate lower priorities.
@ -505,10 +492,9 @@ void PriorityLb::SelectPriorityLocked(uint32_t priority) {
PriorityLb::ChildPriority::ChildPriority(
RefCountedPtr<PriorityLb> priority_policy, std::string name)
: priority_policy_(std::move(priority_policy)), name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] creating child %s (%p)",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(GPR_INFO,
"[priority_lb %p] creating child %s (%p)",
priority_policy_.get(), name_.c_str(), this);
GRPC_CLOSURE_INIT(&on_failover_timer_, OnFailoverTimer, this,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&on_deactivation_timer_, OnDeactivationTimer, this,
@ -518,10 +504,9 @@ PriorityLb::ChildPriority::ChildPriority(
}
void PriorityLb::ChildPriority::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): orphaned",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(GPR_INFO,
"[priority_lb %p] child %s (%p): orphaned",
priority_policy_.get(), name_.c_str(), this);
MaybeCancelFailoverTimerLocked();
if (deactivation_timer_callback_pending_) {
grpc_timer_cancel(&deactivation_timer_);
@ -544,10 +529,9 @@ void PriorityLb::ChildPriority::UpdateLocked(
RefCountedPtr<LoadBalancingPolicy::Config> config,
bool ignore_reresolution_requests) {
if (priority_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): start update",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(GPR_INFO,
"[priority_lb %p] child %s (%p): start update",
priority_policy_.get(), name_.c_str(), this);
ignore_reresolution_requests_ = ignore_reresolution_requests;
// Create policy if needed.
if (child_policy_ == nullptr) {
@ -559,11 +543,10 @@ void PriorityLb::ChildPriority::UpdateLocked(
update_args.addresses = priority_policy_->addresses_[name_];
update_args.args = grpc_channel_args_copy(priority_policy_->args_);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): updating child policy handler %p",
priority_policy_.get(), name_.c_str(), this, child_policy_.get());
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): updating child policy handler %p",
priority_policy_.get(), name_.c_str(), this, child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}
@ -578,12 +561,11 @@ PriorityLb::ChildPriority::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_priority_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): created new child policy "
"handler %p",
priority_policy_.get(), name_.c_str(), this, lb_policy.get());
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): created new child policy "
"handler %p",
priority_policy_.get(), name_.c_str(), this, lb_policy.get());
// Add the parent's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on the parent LB, which in turn is tied to the application's call.
@ -607,13 +589,11 @@ void PriorityLb::ChildPriority::ResetBackoffLocked() {
void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
grpc_connectivity_state state, const absl::Status& status,
std::unique_ptr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): state update: %s (%s) picker %p",
priority_policy_.get(), name_.c_str(), this,
ConnectivityStateName(state), status.ToString().c_str(),
picker.get());
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): state update: %s (%s) picker %p",
priority_policy_.get(), name_.c_str(), this, ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
// Store the state and picker.
connectivity_state_ = state;
connectivity_status_ = status;
@ -627,12 +607,11 @@ void PriorityLb::ChildPriority::OnConnectivityStateUpdateLocked(
}
void PriorityLb::ChildPriority::StartFailoverTimerLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %d ms",
priority_policy_.get(), name_.c_str(), this,
priority_policy_->child_failover_timeout_ms_);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): starting failover timer for %d ms",
priority_policy_.get(), name_.c_str(), this,
priority_policy_->child_failover_timeout_ms_);
Ref(DEBUG_LOCATION, "ChildPriority+OnFailoverTimerLocked").release();
grpc_timer_init(
&failover_timer_,
@ -643,11 +622,9 @@ void PriorityLb::ChildPriority::StartFailoverTimerLocked() {
void PriorityLb::ChildPriority::MaybeCancelFailoverTimerLocked() {
if (failover_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): cancelling failover timer",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(
GPR_INFO, "[priority_lb %p] child %s (%p): cancelling failover timer",
priority_policy_.get(), name_.c_str(), this);
grpc_timer_cancel(&failover_timer_);
failover_timer_callback_pending_ = false;
}
@ -664,12 +641,11 @@ void PriorityLb::ChildPriority::OnFailoverTimer(void* arg,
void PriorityLb::ChildPriority::OnFailoverTimerLocked(grpc_error_handle error) {
if (error == GRPC_ERROR_NONE && failover_timer_callback_pending_ &&
!priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
"reporting TRANSIENT_FAILURE",
priority_policy_.get(), name_.c_str(), this);
failover_timer_callback_pending_ = false;
OnConnectivityStateUpdateLocked(
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -683,13 +659,11 @@ void PriorityLb::ChildPriority::OnFailoverTimerLocked(grpc_error_handle error) {
void PriorityLb::ChildPriority::DeactivateLocked() {
// If already deactivated, don't do it again.
if (deactivation_timer_callback_pending_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in %d "
"ms.",
priority_policy_.get(), name_.c_str(), this,
kChildRetentionIntervalMs);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): deactivating -- will remove in %d "
"ms.",
priority_policy_.get(), name_.c_str(), this, kChildRetentionIntervalMs);
MaybeCancelFailoverTimerLocked();
// Start a timer to delete the child.
Ref(DEBUG_LOCATION, "ChildPriority+timer").release();
@ -701,10 +675,9 @@ void PriorityLb::ChildPriority::DeactivateLocked() {
void PriorityLb::ChildPriority::MaybeReactivateLocked() {
if (deactivation_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO, "[priority_lb %p] child %s (%p): reactivating",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(GPR_INFO,
"[priority_lb %p] child %s (%p): reactivating",
priority_policy_.get(), name_.c_str(), this);
deactivation_timer_callback_pending_ = false;
grpc_timer_cancel(&deactivation_timer_);
}
@ -723,12 +696,11 @@ void PriorityLb::ChildPriority::OnDeactivationTimerLocked(
grpc_error_handle error) {
if (error == GRPC_ERROR_NONE && deactivation_timer_callback_pending_ &&
!priority_policy_->shutting_down_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
priority_policy_.get(), name_.c_str(), this);
}
grpc_lb_priority_trace.Log(
GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
"deleting child",
priority_policy_.get(), name_.c_str(), this);
deactivation_timer_callback_pending_ = false;
priority_policy_->DeleteChild(this);
}

@ -371,12 +371,11 @@ RingHash::Picker::Picker(RefCountedPtr<RingHash> parent,
[](const RingEntry& lhs, const RingEntry& rhs) -> bool {
return lhs.hash < rhs.hash;
});
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO,
"[RH %p picker %p] created picker from subchannel_list=%p "
"with %" PRIuPTR " ring entries",
parent_.get(), this, subchannel_list, ring_.size());
}
grpc_lb_ring_hash_trace.Log(
GPR_INFO,
"[RH %p picker %p] created picker from subchannel_list=%p "
"with %" PRIuPTR " ring entries",
parent_.get(), this, subchannel_list, ring_.size());
}
RingHash::PickResult RingHash::Picker::Pick(PickArgs args) {
@ -578,16 +577,14 @@ bool RingHash::RingHashSubchannelList::UpdateRingHashConnectivityStateLocked() {
void RingHash::RingHashSubchannelData::UpdateConnectivityStateLocked(
grpc_connectivity_state connectivity_state) {
RingHash* p = static_cast<RingHash*>(subchannel_list()->policy());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(
GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s",
p, subchannel(), subchannel_list(), Index(),
subchannel_list()->num_subchannels(),
ConnectivityStateName(last_connectivity_state_),
ConnectivityStateName(connectivity_state));
}
grpc_lb_ring_hash_trace.Log(
GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s",
p, subchannel(), subchannel_list(), Index(),
subchannel_list()->num_subchannels(),
ConnectivityStateName(last_connectivity_state_),
ConnectivityStateName(connectivity_state));
// Decide what state to report for aggregation purposes.
// If we haven't seen a failure since the last time we were in state
// READY, then we report the state change as-is. However, once we do see
@ -621,12 +618,11 @@ void RingHash::RingHashSubchannelData::ProcessConnectivityChangeLocked(
// loop of re-resolution.
// Also attempt to reconnect.
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, subchannel());
}
grpc_lb_ring_hash_trace.Log(
GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, subchannel());
p->channel_control_helper()->RequestReresolution();
}
// Update state counters.
@ -662,22 +658,17 @@ void RingHash::RingHashSubchannelData::ProcessConnectivityChangeLocked(
//
RingHash::RingHash(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO, "[RH %p] Created", this);
}
grpc_lb_ring_hash_trace.Log(GPR_INFO, "[RH %p] Created", this);
}
RingHash::~RingHash() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO, "[RH %p] Destroying Ring Hash policy", this);
}
grpc_lb_ring_hash_trace.Log(GPR_INFO, "[RH %p] Destroying Ring Hash policy",
this);
GPR_ASSERT(subchannel_list_ == nullptr);
}
void RingHash::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO, "[RH %p] Shutting down", this);
}
grpc_lb_ring_hash_trace.Log(GPR_INFO, "[RH %p] Shutting down", this);
shutdown_ = true;
subchannel_list_.reset();
}
@ -685,10 +676,9 @@ void RingHash::ShutdownLocked() {
void RingHash::ResetBackoffLocked() { subchannel_list_->ResetBackoffLocked(); }
void RingHash::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_ring_hash_trace)) {
gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses",
this, args.addresses.size());
}
grpc_lb_ring_hash_trace.Log(
GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses", this,
args.addresses.size());
config_ = std::move(args.config);
// Filter out any address with weight 0.
ServerAddressList addresses;

@ -196,23 +196,19 @@ RoundRobin::Picker::Picker(RoundRobin* parent,
// TODO(roth): rand(3) is not thread-safe. This should be replaced with
// something better as part of https://github.com/grpc/grpc/issues/17891.
last_picked_index_ = rand() % subchannels_.size();
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p picker %p] created picker from subchannel_list=%p "
"with %" PRIuPTR " READY subchannels; last_picked_index_=%" PRIuPTR,
parent_, this, subchannel_list, subchannels_.size(),
last_picked_index_);
}
grpc_lb_round_robin_trace.Log(
GPR_INFO,
"[RR %p picker %p] created picker from subchannel_list=%p "
"with %" PRIuPTR " READY subchannels; last_picked_index_=%" PRIuPTR,
parent_, this, subchannel_list, subchannels_.size(), last_picked_index_);
}
RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs /*args*/) {
last_picked_index_ = (last_picked_index_ + 1) % subchannels_.size();
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p picker %p] returning index %" PRIuPTR ", subchannel=%p",
parent_, this, last_picked_index_,
subchannels_[last_picked_index_].get());
}
grpc_lb_round_robin_trace.Log(
GPR_INFO, "[RR %p picker %p] returning index %" PRIuPTR ", subchannel=%p",
parent_, this, last_picked_index_,
subchannels_[last_picked_index_].get());
return PickResult::Complete(subchannels_[last_picked_index_]);
}
@ -221,23 +217,18 @@ RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs /*args*/) {
//
RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Created", this);
}
grpc_lb_round_robin_trace.Log(GPR_INFO, "[RR %p] Created", this);
}
RoundRobin::~RoundRobin() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
}
grpc_lb_round_robin_trace.Log(GPR_INFO,
"[RR %p] Destroying Round Robin policy", this);
GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
}
void RoundRobin::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
}
grpc_lb_round_robin_trace.Log(GPR_INFO, "[RR %p] Shutting down", this);
shutdown_ = true;
subchannel_list_.reset();
latest_pending_subchannel_list_.reset();
@ -368,16 +359,14 @@ void RoundRobin::RoundRobinSubchannelList::
void RoundRobin::RoundRobinSubchannelData::UpdateConnectivityStateLocked(
grpc_connectivity_state connectivity_state) {
RoundRobin* p = static_cast<RoundRobin*>(subchannel_list()->policy());
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s",
p, subchannel(), subchannel_list(), Index(),
subchannel_list()->num_subchannels(),
ConnectivityStateName(last_connectivity_state_),
ConnectivityStateName(connectivity_state));
}
grpc_lb_round_robin_trace.Log(
GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "): prev_state=%s new_state=%s",
p, subchannel(), subchannel_list(), Index(),
subchannel_list()->num_subchannels(),
ConnectivityStateName(last_connectivity_state_),
ConnectivityStateName(connectivity_state));
// Decide what state to report for aggregation purposes.
// If we haven't seen a failure since the last time we were in state
// READY, then we report the state change as-is. However, once we do see
@ -411,12 +400,11 @@ void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
// loop of re-resolution.
// Also attempt to reconnect.
if (connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, subchannel());
}
grpc_lb_round_robin_trace.Log(
GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution",
p, subchannel());
p->channel_control_helper()->RequestReresolution();
subchannel()->AttemptToConnect();
}
@ -427,17 +415,14 @@ void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
}
void RoundRobin::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses",
this, args.addresses.size());
}
grpc_lb_round_robin_trace.Log(
GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses", this,
args.addresses.size());
// Replace latest_pending_subchannel_list_.
if (latest_pending_subchannel_list_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] Shutting down previous pending subchannel list %p", this,
latest_pending_subchannel_list_.get());
}
grpc_lb_round_robin_trace.Log(
GPR_INFO, "[RR %p] Shutting down previous pending subchannel list %p",
this, latest_pending_subchannel_list_.get());
}
latest_pending_subchannel_list_ = MakeOrphanable<RoundRobinSubchannelList>(
this, &grpc_lb_round_robin_trace, std::move(args.addresses), *args.args);

@ -239,18 +239,16 @@ class SubchannelList : public InternallyRefCounted<SubchannelListType> {
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::Watcher::
OnConnectivityStateChange(grpc_connectivity_state new_state) {
if (GRPC_TRACE_FLAG_ENABLED(*subchannel_list_->tracer())) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): connectivity changed: state=%s, "
"shutting_down=%d, pending_watcher=%p",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_.get(), subchannel_data_->Index(),
subchannel_list_->num_subchannels(),
subchannel_data_->subchannel_.get(),
ConnectivityStateName(new_state), subchannel_list_->shutting_down(),
subchannel_data_->pending_watcher_);
}
subchannel_list_->tracer()->Log(
GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): connectivity changed: state=%s, "
"shutting_down=%d, pending_watcher=%p",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_.get(), subchannel_data_->Index(),
subchannel_list_->num_subchannels(), subchannel_data_->subchannel_.get(),
ConnectivityStateName(new_state), subchannel_list_->shutting_down(),
subchannel_data_->pending_watcher_);
if (!subchannel_list_->shutting_down() &&
subchannel_data_->pending_watcher_ != nullptr) {
subchannel_data_->connectivity_state_ = new_state;
@ -283,14 +281,13 @@ template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::
UnrefSubchannelLocked(const char* reason) {
if (subchannel_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(*subchannel_list_->tracer())) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), reason);
}
subchannel_list_->tracer()->Log(
GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), reason);
subchannel_.reset();
}
}
@ -306,14 +303,13 @@ void SubchannelData<SubchannelListType,
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType,
SubchannelDataType>::StartConnectivityWatchLocked() {
if (GRPC_TRACE_FLAG_ENABLED(*subchannel_list_->tracer())) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): starting watch (from %s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), ConnectivityStateName(connectivity_state_));
}
subchannel_list_->tracer()->Log(
GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): starting watch (from %s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), ConnectivityStateName(connectivity_state_));
GPR_ASSERT(pending_watcher_ == nullptr);
pending_watcher_ =
new Watcher(this, subchannel_list()->Ref(DEBUG_LOCATION, "Watcher"));
@ -326,14 +322,13 @@ void SubchannelData<SubchannelListType,
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>::
CancelConnectivityWatchLocked(const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(*subchannel_list_->tracer())) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), reason);
}
subchannel_list_->tracer()->Log(
GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(),
subchannel_list_, Index(), subchannel_list_->num_subchannels(),
subchannel_.get(), reason);
if (pending_watcher_ != nullptr) {
subchannel_->CancelConnectivityStateWatch(pending_watcher_);
pending_watcher_ = nullptr;
@ -359,11 +354,10 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
GRPC_TRACE_FLAG_ENABLED(*tracer) ? "SubchannelList" : nullptr),
policy_(policy),
tracer_(tracer) {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer_->name(), policy, this, addresses.size());
}
tracer_->Log(GPR_INFO,
"[%s %p] Creating subchannel list %p for %" PRIuPTR
" subchannels",
tracer_->name(), policy, this, addresses.size());
subchannels_.reserve(addresses.size());
// Create a subchannel for each address.
for (ServerAddress address : addresses) {
@ -371,39 +365,31 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
helper->CreateSubchannel(address, args);
if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[%s %p] could not create subchannel for address %s, "
"ignoring",
tracer_->name(), policy_, address.ToString().c_str());
}
tracer_->Log(GPR_INFO,
"[%s %p] could not create subchannel for address %s, "
"ignoring",
tracer_->name(), policy_, address.ToString().c_str());
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address %s",
tracer_->name(), policy_, this, subchannels_.size(),
subchannel.get(), address.ToString().c_str());
}
tracer_->Log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address %s",
tracer_->name(), policy_, this, subchannels_.size(),
subchannel.get(), address.ToString().c_str());
subchannels_.emplace_back(this, std::move(address), std::move(subchannel));
}
}
template <typename SubchannelListType, typename SubchannelDataType>
SubchannelList<SubchannelListType, SubchannelDataType>::~SubchannelList() {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "[%s %p] Destroying subchannel_list %p", tracer_->name(),
policy_, this);
}
tracer_->Log(GPR_INFO, "[%s %p] Destroying subchannel_list %p",
tracer_->name(), policy_, this);
}
template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelList<SubchannelListType, SubchannelDataType>::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(*tracer_)) {
gpr_log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p",
tracer_->name(), policy_, this);
}
tracer_->Log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p",
tracer_->name(), policy_, this);
GPR_ASSERT(!shutting_down_);
shutting_down_ = true;
for (size_t i = 0; i < subchannels_.size(); i++) {

@ -240,23 +240,19 @@ WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick(
WeightedTargetLb::WeightedTargetLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created", this);
}
grpc_lb_weighted_target_trace.Log(GPR_INFO, "[weighted_target_lb %p] created",
this);
}
WeightedTargetLb::~WeightedTargetLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] destroying weighted_target LB policy",
this);
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] destroying weighted_target LB policy",
this);
}
void WeightedTargetLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] shutting down", this);
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] shutting down", this);
shutting_down_ = true;
targets_.clear();
}
@ -267,9 +263,8 @@ void WeightedTargetLb::ResetBackoffLocked() {
void WeightedTargetLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] Received update", this);
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] Received update", this);
// Update config.
config_ = std::move(args.config);
// Deactivate the targets not in the new config.
@ -307,12 +302,11 @@ void WeightedTargetLb::UpdateLocked(UpdateArgs args) {
}
void WeightedTargetLb::UpdateStateLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] scanning children to determine "
"connectivity state",
this);
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] scanning children to determine "
"connectivity state",
this);
// Construct a new picker which maintains a map of all child pickers
// that are ready. Each child is represented by a portion of the range
// proportional to its weight, such that the total range is the sum of the
@ -331,13 +325,11 @@ void WeightedTargetLb::UpdateStateLocked() {
if (config_->target_map().find(child_name) == config_->target_map().end()) {
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] child=%s state=%s weight=%d picker=%p",
this, child_name.c_str(),
ConnectivityStateName(child->connectivity_state()),
child->weight(), child->picker_wrapper().get());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] child=%s state=%s weight=%d picker=%p", this,
child_name.c_str(), ConnectivityStateName(child->connectivity_state()),
child->weight(), child->picker_wrapper().get());
switch (child->connectivity_state()) {
case GRPC_CHANNEL_READY: {
end += child->weight();
@ -371,10 +363,9 @@ void WeightedTargetLb::UpdateStateLocked() {
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s",
this, ConnectivityStateName(connectivity_state));
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] connectivity changed to %s", this,
ConnectivityStateName(connectivity_state));
std::unique_ptr<SubchannelPicker> picker;
absl::Status status;
switch (connectivity_state) {
@ -403,29 +394,25 @@ WeightedTargetLb::WeightedChild::WeightedChild(
RefCountedPtr<WeightedTargetLb> weighted_target_policy,
const std::string& name)
: weighted_target_policy_(std::move(weighted_target_policy)), name_(name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s",
weighted_target_policy_.get(), this, name_.c_str());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] created WeightedChild %p for %s",
weighted_target_policy_.get(), this, name_.c_str());
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: destroying child",
weighted_target_policy_.get(), this, name_.c_str());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] WeightedChild %p %s: destroying child",
weighted_target_policy_.get(), this, name_.c_str());
weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild");
}
void WeightedTargetLb::WeightedChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: shutting down child",
weighted_target_policy_.get(), this, name_.c_str());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: shutting down child",
weighted_target_policy_.get(), this, name_.c_str());
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
@ -454,13 +441,11 @@ WeightedTargetLb::WeightedChild::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_lb_weighted_target_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Created new child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
lb_policy.get());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Created new child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(), lb_policy.get());
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
@ -478,11 +463,9 @@ void WeightedTargetLb::WeightedChild::UpdateLocked(
weight_ = config.weight;
// Reactivate if needed.
if (delayed_removal_timer_callback_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: reactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] WeightedChild %p %s: reactivating",
weighted_target_policy_.get(), this, name_.c_str());
delayed_removal_timer_callback_pending_ = false;
grpc_timer_cancel(&delayed_removal_timer_);
}
@ -496,13 +479,11 @@ void WeightedTargetLb::WeightedChild::UpdateLocked(
update_args.addresses = std::move(addresses);
update_args.args = grpc_channel_args_copy(args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Updating child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(),
child_policy_.get());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: Updating child "
"policy handler %p",
weighted_target_policy_.get(), this, name_.c_str(), child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}
@ -515,14 +496,13 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
std::unique_ptr<SubchannelPicker> picker) {
// Cache the picker in the WeightedChild.
picker_wrapper_ = MakeRefCounted<ChildPickerWrapper>(std::move(picker));
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: connectivity "
"state update: state=%s (%s) picker_wrapper=%p",
weighted_target_policy_.get(), this, name_.c_str(),
ConnectivityStateName(state), status.ToString().c_str(),
picker_wrapper_.get());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: connectivity "
"state update: state=%s (%s) picker_wrapper=%p",
weighted_target_policy_.get(), this, name_.c_str(),
ConnectivityStateName(state), status.ToString().c_str(),
picker_wrapper_.get());
// If the child reports IDLE, immediately tell it to exit idle.
if (state == GRPC_CHANNEL_IDLE) child_policy_->ExitIdleLocked();
// Decide what state to report for aggregation purposes.
@ -546,11 +526,9 @@ void WeightedTargetLb::WeightedChild::OnConnectivityStateUpdateLocked(
void WeightedTargetLb::WeightedChild::DeactivateLocked() {
// If already deactivated, don't do that again.
if (weight_ == 0) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_weighted_target_trace)) {
gpr_log(GPR_INFO,
"[weighted_target_lb %p] WeightedChild %p %s: deactivating",
weighted_target_policy_.get(), this, name_.c_str());
}
grpc_lb_weighted_target_trace.Log(
GPR_INFO, "[weighted_target_lb %p] WeightedChild %p %s: deactivating",
weighted_target_policy_.get(), this, name_.c_str());
// Set the child weight to 0 so that future picker won't contain this child.
weight_ = 0;
// Start a timer to delete the child.

@ -243,21 +243,18 @@ void CdsLb::Helper::UpdateState(grpc_connectivity_state state,
const absl::Status& status,
std::unique_ptr<SubchannelPicker> picker) {
if (parent_->shutting_down_ || parent_->child_policy_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO,
"[cdslb %p] state updated by child: %s message_state: (%s)", this,
ConnectivityStateName(state), status.ToString().c_str());
}
grpc_cds_lb_trace.Log(
GPR_INFO, "[cdslb %p] state updated by child: %s message_state: (%s)",
this, ConnectivityStateName(state), status.ToString().c_str());
parent_->channel_control_helper()->UpdateState(state, status,
std::move(picker));
}
void CdsLb::Helper::RequestReresolution() {
if (parent_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] Re-resolution requested from child policy.",
parent_.get());
}
grpc_cds_lb_trace.Log(GPR_INFO,
"[cdslb %p] Re-resolution requested from child policy.",
parent_.get());
parent_->channel_control_helper()->RequestReresolution();
}
@ -273,30 +270,23 @@ void CdsLb::Helper::AddTraceEvent(TraceSeverity severity,
CdsLb::CdsLb(RefCountedPtr<XdsClient> xds_client, Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created -- using xds client %p", this,
xds_client_.get());
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] created -- using xds client %p",
this, xds_client_.get());
}
CdsLb::~CdsLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] destroying cds LB policy", this);
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] destroying cds LB policy", this);
}
void CdsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] shutting down", this);
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] shutting down", this);
shutting_down_ = true;
MaybeDestroyChildPolicyLocked();
if (xds_client_ != nullptr) {
for (auto& watcher : watchers_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] cancelling watch for cluster %s", this,
watcher.first.c_str());
}
grpc_cds_lb_trace.Log(GPR_INFO,
"[cdslb %p] cancelling watch for cluster %s", this,
watcher.first.c_str());
CancelClusterDataWatch(watcher.first, watcher.second.watcher,
/*delay_unsubscription=*/false);
}
@ -327,10 +317,8 @@ void CdsLb::UpdateLocked(UpdateArgs args) {
// Update config.
auto old_config = std::move(config_);
config_ = std::move(args.config);
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] received update: cluster=%s", this,
config_->cluster().c_str());
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] received update: cluster=%s",
this, config_->cluster().c_str());
// Update args.
grpc_channel_args_destroy(args_);
args_ = args.args;
@ -339,10 +327,9 @@ void CdsLb::UpdateLocked(UpdateArgs args) {
if (old_config == nullptr || old_config->cluster() != config_->cluster()) {
if (old_config != nullptr) {
for (auto& watcher : watchers_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] cancelling watch for cluster %s", this,
watcher.first.c_str());
}
grpc_cds_lb_trace.Log(GPR_INFO,
"[cdslb %p] cancelling watch for cluster %s",
this, watcher.first.c_str());
CancelClusterDataWatch(watcher.first, watcher.second.watcher,
/*delay_unsubscription=*/true);
}
@ -370,10 +357,8 @@ bool CdsLb::GenerateDiscoveryMechanismForCluster(
// Create a new watcher if needed.
if (state.watcher == nullptr) {
auto watcher = absl::make_unique<ClusterWatcher>(Ref(), name);
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] starting watch for cluster %s", this,
name.c_str());
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] starting watch for cluster %s",
this, name.c_str());
state.watcher = watcher.get();
xds_client_->WatchClusterData(name, std::move(watcher));
return false;
@ -421,12 +406,10 @@ bool CdsLb::GenerateDiscoveryMechanismForCluster(
void CdsLb::OnClusterChanged(const std::string& name,
XdsApi::CdsUpdate cluster_data) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(
GPR_INFO,
"[cdslb %p] received CDS update for cluster %s from xds client %p: %s",
this, name.c_str(), xds_client_.get(), cluster_data.ToString().c_str());
}
grpc_cds_lb_trace.Log(
GPR_INFO,
"[cdslb %p] received CDS update for cluster %s from xds client %p: %s",
this, name.c_str(), xds_client_.get(), cluster_data.ToString().c_str());
// Store the update in the map if we are still interested in watching this
// cluster (i.e., it is not cancelled already).
// If we've already deleted this entry, then this is an update notification
@ -496,10 +479,8 @@ void CdsLb::OnClusterChanged(const std::string& name,
}
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties());
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] created child policy %s (%p)", this,
config->name(), child_policy_.get());
}
grpc_cds_lb_trace.Log(GPR_INFO, "[cdslb %p] created child policy %s (%p)",
this, config->name(), child_policy_.get());
}
// Update child policy.
UpdateArgs args;
@ -519,10 +500,9 @@ void CdsLb::OnClusterChanged(const std::string& name,
++it;
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_cds_lb_trace)) {
gpr_log(GPR_INFO, "[cdslb %p] cancelling watch for cluster %s", this,
cluster_name.c_str());
}
grpc_cds_lb_trace.Log(GPR_INFO,
"[cdslb %p] cancelling watch for cluster %s", this,
cluster_name.c_str());
CancelClusterDataWatch(cluster_name, it->second.watcher,
/*delay_unsubscription=*/false);
it = watchers_.erase(it);

@ -275,10 +275,9 @@ XdsClusterImplLb::Picker::Picker(XdsClusterImplLb* xds_cluster_impl_lb,
drop_config_(xds_cluster_impl_lb->config_->drop_config()),
drop_stats_(xds_cluster_impl_lb->drop_stats_),
picker_(std::move(picker)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_impl_lb %p] constructed new picker %p",
xds_cluster_impl_lb, this);
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] constructed new picker %p",
xds_cluster_impl_lb, this);
}
LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
@ -359,24 +358,20 @@ LoadBalancingPolicy::PickResult XdsClusterImplLb::Picker::Pick(
XdsClusterImplLb::XdsClusterImplLb(RefCountedPtr<XdsClient> xds_client,
Args args)
: LoadBalancingPolicy(std::move(args)), xds_client_(std::move(xds_client)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_impl_lb %p] created -- using xds client %p",
this, xds_client_.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] created -- using xds client %p", this,
xds_client_.get());
}
XdsClusterImplLb::~XdsClusterImplLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] destroying xds_cluster_impl LB policy",
this);
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO,
"[xds_cluster_impl_lb %p] destroying xds_cluster_impl LB policy", this);
}
void XdsClusterImplLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_impl_lb %p] shutting down", this);
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] shutting down", this);
shutting_down_ = true;
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
@ -403,9 +398,8 @@ void XdsClusterImplLb::ResetBackoffLocked() {
}
void XdsClusterImplLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_impl_lb %p] Received update", this);
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] Received update", this);
// Update config.
const bool is_initial_update = config_ == nullptr;
auto old_config = std::move(config_);
@ -442,13 +436,12 @@ void XdsClusterImplLb::MaybeUpdatePickerLocked() {
// whether) the child has reported.
if (config_->drop_config() != nullptr && config_->drop_config()->drop_all()) {
auto drop_picker = absl::make_unique<Picker>(this, picker_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] updating connectivity (drop all): "
"state=READY "
"picker=%p",
this, drop_picker.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO,
"[xds_cluster_impl_lb %p] updating connectivity (drop all): "
"state=READY "
"picker=%p",
this, drop_picker.get());
channel_control_helper()->UpdateState(GRPC_CHANNEL_READY, absl::Status(),
std::move(drop_picker));
return;
@ -456,14 +449,13 @@ void XdsClusterImplLb::MaybeUpdatePickerLocked() {
// Otherwise, update only if we have a child picker.
if (picker_ != nullptr) {
auto drop_picker = absl::make_unique<Picker>(this, picker_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] updating connectivity: state=%s "
"status=(%s) "
"picker=%p",
this, ConnectivityStateName(state_), status_.ToString().c_str(),
drop_picker.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO,
"[xds_cluster_impl_lb %p] updating connectivity: state=%s "
"status=(%s) "
"picker=%p",
this, ConnectivityStateName(state_), status_.ToString().c_str(),
drop_picker.get());
channel_control_helper()->UpdateState(state_, status_,
std::move(drop_picker));
}
@ -479,11 +471,9 @@ OrphanablePtr<LoadBalancingPolicy> XdsClusterImplLb::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_xds_cluster_impl_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] Created new child policy handler %p",
this, lb_policy.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] Created new child policy handler %p",
this, lb_policy.get());
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.
@ -507,11 +497,9 @@ void XdsClusterImplLb::UpdateChildPolicyLocked(ServerAddressList addresses,
const_cast<char*>(config_->cluster_name().c_str()));
update_args.args = grpc_channel_args_copy_and_add(args, &cluster_arg, 1);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] Updating child policy handler %p", this,
child_policy_.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO, "[xds_cluster_impl_lb %p] Updating child policy handler %p",
this, child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}
@ -554,14 +542,13 @@ void XdsClusterImplLb::Helper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
std::unique_ptr<SubchannelPicker> picker) {
if (xds_cluster_impl_policy_->shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_impl_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_impl_lb %p] child connectivity state update: "
"state=%s (%s) "
"picker=%p",
xds_cluster_impl_policy_.get(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
}
grpc_xds_cluster_impl_lb_trace.Log(
GPR_INFO,
"[xds_cluster_impl_lb %p] child connectivity state update: "
"state=%s (%s) "
"picker=%p",
xds_cluster_impl_policy_.get(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
// Save the state and picker.
xds_cluster_impl_policy_->state_ = state;
xds_cluster_impl_policy_->status_ = status;

@ -224,18 +224,15 @@ XdsClusterManagerLb::XdsClusterManagerLb(Args args)
: LoadBalancingPolicy(std::move(args)) {}
XdsClusterManagerLb::~XdsClusterManagerLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(
GPR_INFO,
"[xds_cluster_manager_lb %p] destroying xds_cluster_manager LB policy",
this);
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] destroying xds_cluster_manager LB policy",
this);
}
void XdsClusterManagerLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_manager_lb %p] shutting down", this);
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO, "[xds_cluster_manager_lb %p] shutting down", this);
shutting_down_ = true;
children_.clear();
}
@ -250,9 +247,8 @@ void XdsClusterManagerLb::ResetBackoffLocked() {
void XdsClusterManagerLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_manager_lb %p] Received update", this);
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO, "[xds_cluster_manager_lb %p] Received update", this);
// Update config.
config_ = std::move(args.config);
// Deactivate the children not in the new config.
@ -326,22 +322,20 @@ void XdsClusterManagerLb::UpdateStateLocked() {
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_manager_lb %p] connectivity changed to %s",
this, ConnectivityStateName(connectivity_state));
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO, "[xds_cluster_manager_lb %p] connectivity changed to %s", this,
ConnectivityStateName(connectivity_state));
ClusterPicker::ClusterMap cluster_map;
for (const auto& p : config_->cluster_map()) {
const std::string& cluster_name = p.first;
RefCountedPtr<ChildPickerWrapper>& child_picker = cluster_map[cluster_name];
child_picker = children_[cluster_name]->picker_wrapper();
if (child_picker == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] child %s has not yet returned a "
"picker; creating a QueuePicker.",
this, cluster_name.c_str());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] child %s has not yet returned a "
"picker; creating a QueuePicker.",
this, cluster_name.c_str());
child_picker = MakeRefCounted<ChildPickerWrapper>(
cluster_name,
absl::make_unique<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker")));
@ -367,32 +361,28 @@ XdsClusterManagerLb::ClusterChild::ClusterChild(
const std::string& name)
: xds_cluster_manager_policy_(std::move(xds_cluster_manager_policy)),
name_(name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] created ClusterChild %p for %s",
xds_cluster_manager_policy_.get(), this, name_.c_str());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO, "[xds_cluster_manager_lb %p] created ClusterChild %p for %s",
xds_cluster_manager_policy_.get(), this, name_.c_str());
GRPC_CLOSURE_INIT(&on_delayed_removal_timer_, OnDelayedRemovalTimer, this,
grpc_schedule_on_exec_ctx);
}
XdsClusterManagerLb::ClusterChild::~ClusterChild() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p: destroying "
"child",
xds_cluster_manager_policy_.get(), this);
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p: destroying "
"child",
xds_cluster_manager_policy_.get(), this);
xds_cluster_manager_policy_.reset(DEBUG_LOCATION, "ClusterChild");
}
void XdsClusterManagerLb::ClusterChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: "
"shutting down child",
xds_cluster_manager_policy_.get(), this, name_.c_str());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: "
"shutting down child",
xds_cluster_manager_policy_.get(), this, name_.c_str());
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
@ -421,14 +411,12 @@ XdsClusterManagerLb::ClusterChild::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_xds_cluster_manager_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: Created "
"new child "
"policy handler %p",
xds_cluster_manager_policy_.get(), this, name_.c_str(),
lb_policy.get());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: Created "
"new child "
"policy handler %p",
xds_cluster_manager_policy_.get(), this, name_.c_str(), lb_policy.get());
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
@ -458,14 +446,13 @@ void XdsClusterManagerLb::ClusterChild::UpdateLocked(
update_args.addresses = addresses;
update_args.args = grpc_channel_args_copy(args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: "
"Updating child "
"policy handler %p",
xds_cluster_manager_policy_.get(), this, name_.c_str(),
child_policy_.get());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] ClusterChild %p %s: "
"Updating child "
"policy handler %p",
xds_cluster_manager_policy_.get(), this, name_.c_str(),
child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}
@ -527,15 +514,13 @@ XdsClusterManagerLb::ClusterChild::Helper::CreateSubchannel(
void XdsClusterManagerLb::ClusterChild::Helper::UpdateState(
grpc_connectivity_state state, const absl::Status& status,
std::unique_ptr<SubchannelPicker> picker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_cluster_manager_lb_trace)) {
gpr_log(
GPR_INFO,
"[xds_cluster_manager_lb %p] child %s: received update: state=%s (%s) "
"picker=%p",
xds_cluster_manager_child_->xds_cluster_manager_policy_.get(),
xds_cluster_manager_child_->name_.c_str(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
}
grpc_xds_cluster_manager_lb_trace.Log(
GPR_INFO,
"[xds_cluster_manager_lb %p] child %s: received update: state=%s (%s) "
"picker=%p",
xds_cluster_manager_child_->xds_cluster_manager_policy_.get(),
xds_cluster_manager_child_->name_.c_str(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
if (xds_cluster_manager_child_->xds_cluster_manager_policy_->shutting_down_) {
return;
}

@ -369,13 +369,12 @@ void XdsClusterResolverLb::Helper::UpdateState(
xds_cluster_resolver_policy_->child_policy_ == nullptr) {
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] child policy updated state=%s (%s) "
"picker=%p",
xds_cluster_resolver_policy_.get(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] child policy updated state=%s (%s) "
"picker=%p",
xds_cluster_resolver_policy_.get(), ConnectivityStateName(state),
status.ToString().c_str(), picker.get());
xds_cluster_resolver_policy_->channel_control_helper()->UpdateState(
state, status, std::move(picker));
}
@ -392,12 +391,11 @@ void XdsClusterResolverLb::Helper::AddTraceEvent(TraceSeverity severity,
//
void XdsClusterResolverLb::EdsDiscoveryMechanism::Start() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] eds discovery mechanism %" PRIuPTR
":%p starting xds watch for %s",
parent(), index(), this, std::string(GetEdsResourceName()).c_str());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] eds discovery mechanism %" PRIuPTR
":%p starting xds watch for %s",
parent(), index(), this, std::string(GetEdsResourceName()).c_str());
auto watcher = absl::make_unique<EndpointWatcher>(
Ref(DEBUG_LOCATION, "EdsDiscoveryMechanism"));
watcher_ = watcher.get();
@ -406,12 +404,11 @@ void XdsClusterResolverLb::EdsDiscoveryMechanism::Start() {
}
void XdsClusterResolverLb::EdsDiscoveryMechanism::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] eds discovery mechanism %" PRIuPTR
":%p cancelling xds watch for %s",
parent(), index(), this, std::string(GetEdsResourceName()).c_str());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] eds discovery mechanism %" PRIuPTR
":%p cancelling xds watch for %s",
parent(), index(), this, std::string(GetEdsResourceName()).c_str());
parent()->xds_client_->CancelEndpointDataWatch(GetEdsResourceName(),
watcher_);
Unref();
@ -509,22 +506,19 @@ void XdsClusterResolverLb::LogicalDNSDiscoveryMechanism::Start() {
return;
}
resolver_->StartLocked();
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] logical DNS discovery mechanism "
"%" PRIuPTR ":%p starting dns resolver %p",
parent(), index(), this, resolver_.get());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] logical DNS discovery mechanism "
"%" PRIuPTR ":%p starting dns resolver %p",
parent(), index(), this, resolver_.get());
}
void XdsClusterResolverLb::LogicalDNSDiscoveryMechanism::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] logical DNS discovery mechanism %" PRIuPTR
":%p shutting down dns resolver %p",
parent(), index(), this, resolver_.get());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] logical DNS discovery mechanism %" PRIuPTR
":%p shutting down dns resolver %p",
parent(), index(), this, resolver_.get());
resolver_.reset();
Unref();
}
@ -564,12 +558,11 @@ XdsClusterResolverLb::XdsClusterResolverLb(RefCountedPtr<XdsClient> xds_client,
xds_client_(std::move(xds_client)),
server_name_(std::move(server_name)),
is_xds_uri_(is_xds_uri) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] created -- xds_client=%p, "
"server_name=%s, is_xds_uri=%d",
this, xds_client_.get(), server_name_.c_str(), is_xds_uri_);
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] created -- xds_client=%p, "
"server_name=%s, is_xds_uri=%d",
this, xds_client_.get(), server_name_.c_str(), is_xds_uri_);
// EDS-only flow.
if (!is_xds_uri_) {
// Setup channelz linkage.
@ -586,18 +579,16 @@ XdsClusterResolverLb::XdsClusterResolverLb(RefCountedPtr<XdsClient> xds_client,
}
XdsClusterResolverLb::~XdsClusterResolverLb() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] destroying xds_cluster_resolver LB "
"policy",
this);
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] destroying xds_cluster_resolver LB "
"policy",
this);
}
void XdsClusterResolverLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_resolver_lb %p] shutting down", this);
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO, "[xds_cluster_resolver_lb %p] shutting down", this);
shutting_down_ = true;
MaybeDestroyChildPolicyLocked();
discovery_mechanisms_.clear();
@ -628,9 +619,8 @@ void XdsClusterResolverLb::MaybeDestroyChildPolicyLocked() {
}
void XdsClusterResolverLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_resolver_lb %p] Received update", this);
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO, "[xds_cluster_resolver_lb %p] Received update", this);
const bool is_initial_update = args_ == nullptr;
// Update config.
auto old_config = std::move(config_);
@ -685,12 +675,11 @@ void XdsClusterResolverLb::ExitIdleLocked() {
void XdsClusterResolverLb::OnEndpointChanged(size_t index,
XdsApi::EdsUpdate update) {
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p] Received update from xds client"
" for discovery mechanism %" PRIuPTR "",
this, index);
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO,
"[xds_cluster_resolver_lb %p] Received update from xds client"
" for discovery mechanism %" PRIuPTR "",
this, index);
// We need at least one priority for each discovery mechanism, just so that we
// have a child in which to create the xds_cluster_impl policy. This ensures
// that we properly handle the case of a discovery mechanism dropping 100% of
@ -1048,10 +1037,9 @@ void XdsClusterResolverLb::UpdateChildPolicyLocked() {
if (child_policy_ == nullptr) {
child_policy_ = CreateChildPolicyLocked(update_args.args);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_cluster_resolver_lb %p] Updating child policy %p",
this, child_policy_.get());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO, "[xds_cluster_resolver_lb %p] Updating child policy %p", this,
child_policy_.get());
child_policy_->UpdateLocked(std::move(update_args));
}
@ -1082,11 +1070,9 @@ XdsClusterResolverLb::CreateChildPolicyLocked(const grpc_channel_args* args) {
"[xds_cluster_resolver_lb %p] failure creating child policy", this);
return nullptr;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_xds_cluster_resolver_trace)) {
gpr_log(GPR_INFO,
"[xds_cluster_resolver_lb %p]: Created new child policy %p", this,
lb_policy.get());
}
grpc_lb_xds_cluster_resolver_trace.Log(
GPR_INFO, "[xds_cluster_resolver_lb %p]: Created new child policy %p",
this, lb_policy.get());
// Add our interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// this policy, which in turn is tied to the application's call.

@ -56,17 +56,14 @@ class XdsResolver : public Resolver {
server_name_(absl::StripPrefix(args.uri.path(), "/")),
args_(grpc_channel_args_copy(args.args)),
interested_parties_(args.pollset_set) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] created for server name %s", this,
server_name_.c_str());
}
grpc_xds_resolver_trace.Log(GPR_INFO,
"[xds_resolver %p] created for server name %s",
this, server_name_.c_str());
}
~XdsResolver() override {
grpc_channel_args_destroy(args_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] destroyed", this);
}
grpc_xds_resolver_trace.Log(GPR_INFO, "[xds_resolver %p] destroyed", this);
}
void StartLocked() override;
@ -370,10 +367,9 @@ bool XdsResolver::XdsConfigSelector::Route::operator==(
XdsResolver::XdsConfigSelector::XdsConfigSelector(
RefCountedPtr<XdsResolver> resolver, grpc_error_handle* error)
: resolver_(std::move(resolver)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] creating XdsConfigSelector %p",
resolver_.get(), this);
}
grpc_xds_resolver_trace.Log(GPR_INFO,
"[xds_resolver %p] creating XdsConfigSelector %p",
resolver_.get(), this);
// 1. Construct the route table
// 2 Update resolver's cluster state map
// 3. Construct cluster list to hold on to entries in the cluster state
@ -385,10 +381,9 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
// invalid data.
route_table_.reserve(resolver_->current_virtual_host_.routes.size());
for (auto& route : resolver_->current_virtual_host_.routes) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] XdsConfigSelector %p: route: %s",
resolver_.get(), this, route.ToString().c_str());
}
grpc_xds_resolver_trace.Log(
GPR_INFO, "[xds_resolver %p] XdsConfigSelector %p: route: %s",
resolver_.get(), this, route.ToString().c_str());
route_table_.emplace_back();
auto& route_entry = route_table_.back();
route_entry.route = route;
@ -436,10 +431,9 @@ XdsResolver::XdsConfigSelector::XdsConfigSelector(
}
XdsResolver::XdsConfigSelector::~XdsConfigSelector() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] destroying XdsConfigSelector %p",
resolver_.get(), this);
}
grpc_xds_resolver_trace.Log(
GPR_INFO, "[xds_resolver %p] destroying XdsConfigSelector %p",
resolver_.get(), this);
clusters_.clear();
resolver_->MaybeRemoveUnusedClusters();
}
@ -781,9 +775,8 @@ void XdsResolver::StartLocked() {
}
void XdsResolver::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] shutting down", this);
}
grpc_xds_resolver_trace.Log(GPR_INFO, "[xds_resolver %p] shutting down",
this);
if (xds_client_ != nullptr) {
if (listener_watcher_ != nullptr) {
xds_client_->CancelListenerDataWatch(server_name_, listener_watcher_,
@ -806,9 +799,8 @@ void XdsResolver::ShutdownLocked() {
}
void XdsResolver::OnListenerUpdate(XdsApi::LdsUpdate listener) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] received updated listener data", this);
}
grpc_xds_resolver_trace.Log(
GPR_INFO, "[xds_resolver %p] received updated listener data", this);
if (listener.http_connection_manager.route_config_name !=
route_config_name_) {
if (route_config_watcher_ != nullptr) {
@ -841,9 +833,8 @@ void XdsResolver::OnListenerUpdate(XdsApi::LdsUpdate listener) {
}
void XdsResolver::OnRouteConfigUpdate(XdsApi::RdsUpdate rds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] received updated route config", this);
}
grpc_xds_resolver_trace.Log(
GPR_INFO, "[xds_resolver %p] received updated route config", this);
// Find the relevant VirtualHost from the RouteConfiguration.
XdsApi::RdsUpdate::VirtualHost* vhost =
rds_update.FindVirtualHostForDomain(server_name_);
@ -932,10 +923,9 @@ void XdsResolver::GenerateResult() {
OnError(error);
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_resolver_trace)) {
gpr_log(GPR_INFO, "[xds_resolver %p] generated service config: %s", this,
result.service_config->json_string().c_str());
}
grpc_xds_resolver_trace.Log(
GPR_INFO, "[xds_resolver %p] generated service config: %s", this,
result.service_config->json_string().c_str());
grpc_arg new_args[] = {
xds_client_->MakeChannelArg(),
config_selector->MakeChannelArg(),

@ -676,23 +676,20 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld)
seen_recv_trailing_metadata_from_surface_(false),
abandoned_(false) {
lb_call_ = calld->CreateLoadBalancedCall(&attempt_dispatch_controller_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: create lb_call=%p",
calld->chand_, calld, this, lb_call_.get());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: create lb_call=%p",
calld->chand_, calld, this, lb_call_.get());
// If per_attempt_recv_timeout is set, start a timer.
if (calld->retry_policy_ != nullptr &&
calld->retry_policy_->per_attempt_recv_timeout().has_value()) {
grpc_millis per_attempt_recv_deadline =
ExecCtx::Get()->Now() +
*calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64
" ms",
calld->chand_, calld, this,
*calld->retry_policy_->per_attempt_recv_timeout());
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64 " ms",
calld->chand_, calld, this,
*calld->retry_policy_->per_attempt_recv_timeout());
// Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&on_per_attempt_recv_timer_, OnPerAttemptRecvTimer, this,
nullptr);
@ -705,10 +702,9 @@ RetryFilter::CallData::CallAttempt::CallAttempt(CallData* calld)
}
RetryFilter::CallData::CallAttempt::~CallAttempt() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying call attempt",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: destroying call attempt",
calld_->chand_, calld_, this);
}
void RetryFilter::CallData::CallAttempt::FreeCachedSendOpDataAfterCommit() {
@ -771,12 +767,11 @@ void RetryFilter::CallData::CallAttempt::MaybeSwitchToFastPath() {
// yet seen that op from the surface, we can't switch yet.
if (recv_trailing_metadata_internal_batch_ != nullptr) return;
// Switch to fast path.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt",
calld_->chand_, calld_, this);
calld_->committed_call_ = std::move(lb_call_);
calld_->call_attempt_.reset(DEBUG_LOCATION, "MaybeSwitchToFastPath");
}
@ -790,12 +785,11 @@ RetryFilter::CallData::CallAttempt::MaybeCreateBatchForReplay() {
// send_initial_metadata.
if (calld_->seen_send_initial_metadata_ && !started_send_initial_metadata_ &&
!calld_->pending_send_initial_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_initial_metadata op",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_initial_metadata op",
calld_->chand_, calld_, this);
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
replay_batch_data->AddRetriableSendInitialMetadataOp();
}
@ -804,12 +798,11 @@ RetryFilter::CallData::CallAttempt::MaybeCreateBatchForReplay() {
if (started_send_message_count_ < calld_->send_messages_.size() &&
started_send_message_count_ == completed_send_message_count_ &&
!calld_->pending_send_message_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_message op",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_message op",
calld_->chand_, calld_, this);
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
}
@ -823,12 +816,11 @@ RetryFilter::CallData::CallAttempt::MaybeCreateBatchForReplay() {
started_send_message_count_ == calld_->send_messages_.size() &&
!started_send_trailing_metadata_ &&
!calld_->pending_send_trailing_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_trailing_metadata op",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_trailing_metadata op",
calld_->chand_, calld_, this);
if (replay_batch_data == nullptr) {
replay_batch_data = CreateBatch(1, true /* set_on_complete */);
}
@ -853,11 +845,10 @@ void StartBatchInCallCombiner(void* arg, grpc_error_handle /*ignored*/) {
void RetryFilter::CallData::CallAttempt::AddClosureForBatch(
grpc_transport_stream_op_batch* batch, const char* reason,
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: adding batch (%s): %s",
calld_->chand_, calld_, this, reason,
grpc_transport_stream_op_batch_string(batch).c_str());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: adding batch (%s): %s",
calld_->chand_, calld_, this, reason,
grpc_transport_stream_op_batch_string(batch).c_str());
batch->handler_private.extra_arg = lb_call_.get();
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
batch, grpc_schedule_on_exec_ctx);
@ -866,12 +857,11 @@ void RetryFilter::CallData::CallAttempt::AddClosureForBatch(
void RetryFilter::CallData::CallAttempt::
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call failed but "
"recv_trailing_metadata not started; starting it internally",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: call failed but "
"recv_trailing_metadata not started; starting it internally",
calld_->chand_, calld_, this);
// Create batch_data with 2 refs, since this batch will be unreffed twice:
// once for the recv_trailing_metadata_ready callback when the batch
// completes, and again when we actually get a recv_trailing_metadata
@ -1043,22 +1033,19 @@ void RetryFilter::CallData::CallAttempt::AddRetriableBatches(
}
void RetryFilter::CallData::CallAttempt::StartRetriableBatches() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: constructing retriable batches",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p attempt=%p: constructing retriable batches",
calld_->chand_, calld_, this);
// Construct list of closures to execute, one for each pending batch.
CallCombinerClosureList closures;
AddRetriableBatches(&closures);
// Note: This will yield the call combiner.
// Start batches on LB call.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting %" PRIuPTR
" retriable batches on lb_call=%p",
calld_->chand_, calld_, this, closures.size(), lb_call_.get());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting %" PRIuPTR
" retriable batches on lb_call=%p",
calld_->chand_, calld_, this, closures.size(),
lb_call_.get());
closures.RunClosures(calld_->call_combiner_);
}
@ -1083,21 +1070,18 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
if (calld_->retry_throttle_data_ != nullptr) {
calld_->retry_throttle_data_->RecordSuccess();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: call succeeded",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call succeeded",
calld_->chand_, calld_, this);
return false;
}
// Status is not OK. Check whether the status is retryable.
if (!calld_->retry_policy_->retryable_status_codes().Contains(*status)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: status %s not configured as "
"retryable",
calld_->chand_, calld_, this,
grpc_status_code_to_string(*status));
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: status %s not configured as "
"retryable",
calld_->chand_, calld_, this, grpc_status_code_to_string(*status));
return false;
}
}
@ -1110,30 +1094,25 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
// checks, so that we don't fail to record failures due to other factors.
if (calld_->retry_throttle_data_ != nullptr &&
!calld_->retry_throttle_data_->RecordFailure()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: retries throttled",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retries throttled",
calld_->chand_, calld_, this);
return false;
}
// Check whether the call is committed.
if (calld_->retry_committed_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retries already committed",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p attempt=%p: retries already committed",
calld_->chand_, calld_, this);
return false;
}
// Check whether we have retries remaining.
++calld_->num_attempts_completed_;
if (calld_->num_attempts_completed_ >=
calld_->retry_policy_->max_attempts()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(
GPR_INFO, "chand=%p calld=%p attempt=%p: exceeded %d retry attempts",
calld_->chand_, calld_, this, calld_->retry_policy_->max_attempts());
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p attempt=%p: exceeded %d retry attempts",
calld_->chand_, calld_, this, calld_->retry_policy_->max_attempts());
return false;
}
// Check server push-back.
@ -1141,20 +1120,17 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
// If the value is "-1" or any other unparseable string, we do not retry.
uint32_t ms;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: not retrying due to server "
"push-back",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: not retrying due to server "
"push-back",
calld_->chand_, calld_, this);
return false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: server push-back: retry in %u ms",
calld_->chand_, calld_, this, ms);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: server push-back: retry in %u ms",
calld_->chand_, calld_, this, ms);
*server_pushback_ms = static_cast<grpc_millis>(ms);
}
}
@ -1162,12 +1138,10 @@ bool RetryFilter::CallData::CallAttempt::ShouldRetry(
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
calld_->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
if (!service_config_call_data->call_dispatch_controller()->ShouldRetry()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: call dispatch controller denied retry",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: call dispatch controller denied retry",
calld_->chand_, calld_, this);
return false;
}
// We should retry.
@ -1218,14 +1192,12 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void* arg, grpc_error_handle error) {
auto* call_attempt = static_cast<CallAttempt*>(arg);
auto* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: perAttemptRecvTimeout timer fired: "
"error=%s, per_attempt_recv_timer_pending_=%d",
calld->chand_, calld, call_attempt,
grpc_error_std_string(error).c_str(),
call_attempt->per_attempt_recv_timer_pending_);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: perAttemptRecvTimeout timer fired: "
"error=%s, per_attempt_recv_timer_pending_=%d",
calld->chand_, calld, call_attempt, grpc_error_std_string(error).c_str(),
call_attempt->per_attempt_recv_timer_pending_);
CallCombinerClosureList closures;
if (error == GRPC_ERROR_NONE &&
call_attempt->per_attempt_recv_timer_pending_) {
@ -1261,12 +1233,10 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void RetryFilter::CallData::CallAttempt::MaybeCancelPerAttemptRecvTimer() {
if (per_attempt_recv_timer_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: cancelling "
"perAttemptRecvTimeout timer",
calld_->chand_, calld_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: cancelling "
"perAttemptRecvTimeout timer",
calld_->chand_, calld_, this);
per_attempt_recv_timer_pending_ = false;
grpc_timer_cancel(&per_attempt_recv_timer_);
}
@ -1282,11 +1252,10 @@ RetryFilter::CallData::CallAttempt::BatchData::BatchData(
GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace) ? "BatchData" : nullptr,
refcount),
call_attempt_(std::move(attempt)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: creating batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_,
call_attempt_.get(), this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: creating batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_,
call_attempt_.get(), this);
// We hold a ref to the call stack for every batch sent on a call attempt.
// This is because some batches on the call attempt may not complete
// until after all of the batches are completed at the surface (because
@ -1303,11 +1272,10 @@ RetryFilter::CallData::CallAttempt::BatchData::BatchData(
}
RetryFilter::CallData::CallAttempt::BatchData::~BatchData() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_,
call_attempt_.get(), this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: destroying batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_,
call_attempt_.get(), this);
if (batch_.send_initial_metadata) {
grpc_metadata_batch_destroy(&call_attempt_->send_initial_metadata_);
}
@ -1388,13 +1356,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_.get();
CallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_initial_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_initial_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
call_attempt->completed_recv_initial_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_initial_metadata op, so do nothing.
@ -1415,12 +1381,10 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
if (GPR_UNLIKELY((call_attempt->trailing_metadata_available_ ||
error != GRPC_ERROR_NONE) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring "
"recv_initial_metadata_ready (Trailers-Only)",
calld->chand_, calld, call_attempt);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring "
"recv_initial_metadata_ready (Trailers-Only)",
calld->chand_, calld, call_attempt);
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
std::move(batch_data);
call_attempt->recv_initial_metadata_error_ = GRPC_ERROR_REF(error);
@ -1487,13 +1451,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_.get();
CallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_message_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_message_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
++call_attempt->completed_recv_message_count_;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_message op, so do nothing.
@ -1513,12 +1475,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
if (GPR_UNLIKELY((call_attempt->recv_message_ == nullptr ||
error != GRPC_ERROR_NONE) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
"(nullptr message and recv_trailing_metadata pending)",
calld->chand_, calld, call_attempt);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
"(nullptr message and recv_trailing_metadata pending)",
calld->chand_, calld, call_attempt);
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
call_attempt->recv_message_error_ = GRPC_ERROR_REF(error);
CallCombinerClosureList closures;
@ -1683,13 +1644,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_.get();
CallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_trailing_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_trailing_metadata_ready, error=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str());
call_attempt->completed_recv_trailing_metadata_ = true;
// If this attempt has been abandoned, then we're not going to use the
// result of this recv_trailing_metadata op, so do nothing.
@ -1709,13 +1668,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
bool is_lb_drop = false;
GetCallStatus(calld->deadline_, md_batch, GRPC_ERROR_REF(error), &status,
&server_pushback_md, &is_lb_drop);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s is_lb_drop=%d",
calld->chand_, calld, call_attempt, grpc_status_code_to_string(status),
is_lb_drop);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s is_lb_drop=%d",
calld->chand_, calld, call_attempt, grpc_status_code_to_string(status),
is_lb_drop);
// Check if we should retry.
grpc_millis server_pushback_ms = -1;
if (call_attempt->ShouldRetry(status, is_lb_drop, server_pushback_md,
@ -1800,12 +1757,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::
}
}
if (have_pending_send_ops) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting next batch for pending "
"send op(s)",
calld->chand_, calld, call_attempt_.get());
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: starting next batch for pending "
"send op(s)",
calld->chand_, calld, call_attempt_.get());
call_attempt_->AddRetriableBatches(closures);
}
}
@ -1815,14 +1771,13 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_.get();
CallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
// If this attempt has been abandoned, then we're not going to propagate
// the completion of this batch, so do nothing.
if (call_attempt->abandoned_) {
@ -1836,10 +1791,9 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY(!calld->retry_committed_ && error != GRPC_ERROR_NONE &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
calld->chand_, calld, call_attempt);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring on_complete",
calld->chand_, calld, call_attempt);
call_attempt->on_complete_deferred_batches_.emplace_back(
std::move(batch_data), GRPC_ERROR_REF(error));
CallCombinerClosureList closures;
@ -1891,14 +1845,13 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnCompleteForCancelOp(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_.get();
CallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete for cancel_stream batch, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete for cancel_stream batch, error=%s, batch=%s",
calld->chand_, calld, call_attempt, batch_data.get(),
grpc_error_std_string(error).c_str(),
grpc_transport_stream_op_batch_string(&batch_data->batch_).c_str());
GRPC_CALL_COMBINER_STOP(
calld->call_combiner_,
"on_complete for internally generated cancel_stream op");
@ -1960,14 +1913,12 @@ void RetryFilter::CallData::CallAttempt::BatchData::
void RetryFilter::CallData::CallAttempt::BatchData::
AddRetriableSendMessageOp() {
auto* calld = call_attempt_->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: starting calld->send_messages[%" PRIuPTR
"]",
calld->chand_, calld, call_attempt_.get(),
call_attempt_->started_send_message_count_);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: starting calld->send_messages[%" PRIuPTR
"]",
calld->chand_, calld, call_attempt_.get(),
call_attempt_->started_send_message_count_);
ByteStreamCache* cache =
calld->send_messages_[call_attempt_->started_send_message_count_];
++call_attempt_->started_send_message_count_;
@ -2054,10 +2005,8 @@ grpc_error_handle RetryFilter::CallData::Init(
grpc_call_element* elem, const grpc_call_element_args* args) {
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
new (elem->call_data) CallData(chand, *args);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", chand,
elem->call_data);
}
grpc_retry_trace.Log(GPR_INFO, "chand=%p calld=%p: created call", chand,
elem->call_data);
return GRPC_ERROR_NONE;
}
@ -2154,10 +2103,9 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
// Handle cancellation.
if (GPR_UNLIKELY(batch->cancel_stream)) {
grpc_error_handle cancel_error = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_,
this, grpc_error_std_string(cancel_error).c_str());
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_, this,
grpc_error_std_string(cancel_error).c_str());
// If we have a current call attempt, commit the call, then send
// the cancellation down to that attempt. When the call fails, it
// will not be retried, because we have committed it here.
@ -2175,10 +2123,8 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
}
// Cancel retry timer.
if (retry_timer_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling retry timer", chand_,
this);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: cancelling retry timer", chand_, this);
retry_timer_pending_ = false; // Lame timer callback.
grpc_timer_cancel(&retry_timer_);
FreeAllCachedSendOpData();
@ -2224,12 +2170,11 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
if (num_attempts_completed_ == 0 && retry_committed_ &&
(retry_policy_ == nullptr ||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retry committed before first attempt; "
"creating LB call",
chand_, this);
}
grpc_retry_trace.Log(
GPR_INFO,
"chand=%p calld=%p: retry committed before first attempt; "
"creating LB call",
chand_, this);
PendingBatchClear(pending);
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
@ -2241,18 +2186,15 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
// Otherwise, create a call attempt.
// The attempt will automatically start any necessary replays or
// pending batches.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: creating call attempt", chand_,
this);
}
grpc_retry_trace.Log(GPR_INFO, "chand=%p calld=%p: creating call attempt",
chand_, this);
CreateCallAttempt();
return;
}
// Send batches to call attempt.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on attempt=%p", chand_,
this, call_attempt_.get());
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p: starting batch on attempt=%p",
chand_, this, call_attempt_.get());
call_attempt_->StartRetriableBatches();
}
@ -2321,27 +2263,23 @@ void RetryFilter::CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
}
void RetryFilter::CallData::FreeCachedSendInitialMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_initial_metadata",
chand_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p: destroying send_initial_metadata",
chand_, this);
grpc_metadata_batch_destroy(&send_initial_metadata_);
}
void RetryFilter::CallData::FreeCachedSendMessage(size_t idx) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying send_messages[%" PRIuPTR "]", chand_,
this, idx);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: destroying send_messages[%" PRIuPTR "]",
chand_, this, idx);
send_messages_[idx]->Destroy();
}
void RetryFilter::CallData::FreeCachedSendTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_trailing_metadata",
chand_, this);
}
grpc_retry_trace.Log(GPR_INFO,
"chand=%p calld=%p: destroying send_trailing_metadata",
chand_, this);
grpc_metadata_batch_destroy(&send_trailing_metadata_);
}
@ -2376,11 +2314,9 @@ size_t RetryFilter::CallData::GetBatchIndex(
RetryFilter::CallData::PendingBatch* RetryFilter::CallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
PendingBatch* pending = &pending_batches_[idx];
GPR_ASSERT(pending->batch == nullptr);
pending->batch = batch;
@ -2407,11 +2343,9 @@ RetryFilter::CallData::PendingBatch* RetryFilter::CallData::PendingBatchesAdd(
// ops have already been sent, and we commit to that attempt.
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
chand_->per_rpc_retry_buffer_size_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand_, this);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: exceeded retry buffer size, committing",
chand_, this);
RetryCommit(call_attempt_.get());
}
return pending;
@ -2443,10 +2377,8 @@ void RetryFilter::CallData::MaybeClearPendingBatch(PendingBatch* pending) {
(!batch->recv_trailing_metadata ||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand_,
this);
}
grpc_retry_trace.Log(GPR_INFO, "chand=%p calld=%p: clearing pending batch",
chand_, this);
PendingBatchClear(pending);
}
}
@ -2499,11 +2431,9 @@ RetryFilter::CallData::PendingBatch* RetryFilter::CallData::PendingBatchFind(
PendingBatch* pending = &pending_batches_[i];
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR,
chand_, this, log_message, i);
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: %s pending batch at index %" PRIuPTR,
chand_, this, log_message, i);
return pending;
}
}
@ -2517,9 +2447,8 @@ RetryFilter::CallData::PendingBatch* RetryFilter::CallData::PendingBatchFind(
void RetryFilter::CallData::RetryCommit(CallAttempt* call_attempt) {
if (retry_committed_) return;
retry_committed_ = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand_, this);
}
grpc_retry_trace.Log(GPR_INFO, "chand=%p calld=%p: committing retries",
chand_, this);
if (call_attempt != nullptr) {
// If the call attempt's LB call has been committed, inform the call
// dispatch controller that the call has been committed.
@ -2548,11 +2477,9 @@ void RetryFilter::CallData::StartRetryTimer(grpc_millis server_pushback_ms) {
} else {
next_attempt_time = retry_backoff_.NextAttemptTime();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_,
this, next_attempt_time - ExecCtx::Get()->Now());
}
grpc_retry_trace.Log(
GPR_INFO, "chand=%p calld=%p: retrying failed call in %" PRId64 " ms",
chand_, this, next_attempt_time - ExecCtx::Get()->Now());
// Schedule retry after computed delay.
GRPC_CLOSURE_INIT(&retry_closure_, OnRetryTimer, this, nullptr);
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");

@ -319,13 +319,12 @@ class Subchannel::ConnectedSubchannelStateWatcher
case GRPC_CHANNEL_TRANSIENT_FAILURE:
case GRPC_CHANNEL_SHUTDOWN: {
if (!c->disconnected_ && c->connected_subchannel_ != nullptr) {
if (grpc_trace_subchannel.enabled()) {
gpr_log(GPR_INFO,
"Connected subchannel %p of subchannel %p has gone into "
"%s. Attempting to reconnect.",
c->connected_subchannel_.get(), c,
ConnectivityStateName(new_state));
}
grpc_trace_subchannel.Log(
GPR_INFO,
"Connected subchannel %p of subchannel %p has gone into "
"%s. Attempting to reconnect.",
c->connected_subchannel_.get(), c,
ConnectivityStateName(new_state));
c->connected_subchannel_.reset();
if (c->channelz_node() != nullptr) {
c->channelz_node()->SetChildSocket(nullptr);
@ -736,10 +735,9 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
// Only update the value if the new keepalive time is larger.
if (new_keepalive_time > keepalive_time_) {
keepalive_time_ = new_keepalive_time;
if (grpc_trace_subchannel.enabled()) {
gpr_log(GPR_INFO, "Subchannel=%p: Throttling keepalive time to %d", this,
new_keepalive_time);
}
grpc_trace_subchannel.Log(GPR_INFO,
"Subchannel=%p: Throttling keepalive time to %d",
this, new_keepalive_time);
const grpc_arg arg_to_add = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), new_keepalive_time);
const char* arg_to_remove = GRPC_ARG_KEEPALIVE_TIME_MS;

@ -211,13 +211,12 @@ class CallData::ResumeBatchCanceller {
auto* calld = static_cast<CallData*>(self->elem_->call_data);
{
MutexLock lock(&calld->delay_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_fault_injection_filter_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling schdueled pick: "
"error=%s self=%p calld->resume_batch_canceller_=%p",
chand, calld, grpc_error_std_string(error).c_str(), self,
calld->resume_batch_canceller_);
}
grpc_fault_injection_filter_trace.Log(
GPR_INFO,
"chand=%p calld=%p: cancelling schdueled pick: "
"error=%s self=%p calld->resume_batch_canceller_=%p",
chand, calld, grpc_error_std_string(error).c_str(), self,
calld->resume_batch_canceller_);
if (error != GRPC_ERROR_NONE && calld->resume_batch_canceller_ == self) {
// Cancel the delayed pick.
calld->CancelDelayTimer();
@ -263,12 +262,11 @@ void CallData::StartTransportStreamOpBatch(
if (batch->send_initial_metadata) {
calld->DecideWhetherToInjectFaults(
batch->payload->send_initial_metadata.send_initial_metadata);
if (GRPC_TRACE_FLAG_ENABLED(grpc_fault_injection_filter_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: Fault injection triggered delay=%d abort=%d",
elem->channel_data, calld, calld->delay_request_,
calld->abort_request_);
}
grpc_fault_injection_filter_trace.Log(
GPR_INFO,
"chand=%p calld=%p: Fault injection triggered delay=%d abort=%d",
elem->channel_data, calld, calld->delay_request_,
calld->abort_request_);
if (calld->MaybeDelay()) {
// Delay the batch, and pass down the batch in the scheduled closure.
calld->DelayBatch(elem, batch);
@ -445,10 +443,9 @@ void CallData::ResumeBatch(void* arg, grpc_error_handle error) {
calld->resume_batch_canceller_ == nullptr) {
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_fault_injection_filter_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: Resuming delayed stream op batch %p",
elem->channel_data, calld, calld->delayed_batch_);
}
grpc_fault_injection_filter_trace.Log(
GPR_INFO, "chand=%p calld=%p: Resuming delayed stream op batch %p",
elem->channel_data, calld, calld->delayed_batch_);
// Lame the canceller
calld->resume_batch_canceller_ = nullptr;
// Finish fault injection.

@ -1216,19 +1216,17 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_error_std_string(error).c_str(),
write_state_name(t->write_state));
}
grpc_http_trace.Log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
static_cast<int>(closure->next_data.scratch /
CLOSURE_BARRIER_FIRST_REF_BIT),
static_cast<int>(closure->next_data.scratch %
CLOSURE_BARRIER_FIRST_REF_BIT),
desc, grpc_error_std_string(error).c_str(),
write_state_name(t->write_state));
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
closure->error_data.error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@ -1637,10 +1635,8 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s,
grpc_transport_stream_op_batch_string(op).c_str());
}
grpc_http_trace.Log(GPR_INFO, "perform_stream_op[s=%p]: %s", s,
grpc_transport_stream_op_batch_string(op).c_str());
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
op->handler_private.extra_arg = gs;
@ -1833,10 +1829,8 @@ static void perform_transport_op_locked(void* stream_op,
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", t,
grpc_transport_op_string(op).c_str());
}
grpc_http_trace.Log(GPR_INFO, "perform_transport_op[t=%p]: %s", t,
grpc_transport_op_string(op).c_str());
op->handler_private.extra_arg = gt;
GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
t->combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
@ -2626,10 +2620,9 @@ static void start_bdp_ping(void* tp, grpc_error_handle error) {
static void start_bdp_ping_locked(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
}
grpc_http_trace.Log(GPR_INFO, "%s: Start BDP ping err=%s",
t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
if (error != GRPC_ERROR_NONE || t->closed_with_error != GRPC_ERROR_NONE) {
return;
}
@ -2650,10 +2643,9 @@ static void finish_bdp_ping(void* tp, grpc_error_handle error) {
static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
}
grpc_http_trace.Log(GPR_INFO, "%s: Complete BDP ping err=%s",
t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
if (error != GRPC_ERROR_NONE || t->closed_with_error != GRPC_ERROR_NONE) {
GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping");
return;
@ -3189,21 +3181,19 @@ static void benign_reclaimer_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
t->peer_string.c_str());
}
grpc_resource_quota_trace.Log(GPR_INFO,
"HTTP2: %s - send goaway to free memory",
t->peer_string.c_str());
send_goaway(t,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE &&
GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
t->peer_string.c_str(),
grpc_chttp2_stream_map_size(&t->stream_map));
} else if (error == GRPC_ERROR_NONE) {
grpc_resource_quota_trace.Log(
GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
t->peer_string.c_str(), grpc_chttp2_stream_map_size(&t->stream_map));
}
t->benign_reclaimer_registered = false;
if (error != GRPC_ERROR_CANCELLED) {
@ -3226,10 +3216,8 @@ static void destructive_reclaimer_locked(void* arg, grpc_error_handle error) {
if (error == GRPC_ERROR_NONE && n > 0) {
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
grpc_chttp2_stream_map_rand(&t->stream_map));
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
t->peer_string.c_str(), s->id);
}
grpc_resource_quota_trace.Log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
t->peer_string.c_str(), s->id);
grpc_chttp2_cancel_stream(
t, s,
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),

@ -260,14 +260,13 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
}
}
parser->incoming_settings[id] = parser->value;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR", t->peer_string.c_str(),
sp->name, parser->value);
}
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_ERROR, "CHTTP2: Ignoring unknown setting %d (value %d)",
parser->id, parser->value);
grpc_http_trace.Log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR",
t->peer_string.c_str(), sp->name, parser->value);
} else {
grpc_http_trace.Log(GPR_ERROR,
"CHTTP2: Ignoring unknown setting %d (value %d)",
parser->id, parser->value);
}
break;
}

@ -633,9 +633,8 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
}
}
c->advertise_table_size_change = 1;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
}
grpc_http_trace.Log(GPR_INFO, "set max table size from encoder to %d",
max_table_size);
}
void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,

@ -1184,9 +1184,7 @@ class HPackParser::Parser {
false);
}
(*dynamic_table_updates_allowed_)--;
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_hpack_parser)) {
gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", *size);
}
grpc_trace_chttp2_hpack_parser.Log(GPR_INFO, "MAX TABLE SIZE: %d", *size);
grpc_error_handle err = table_->SetCurrentTableSize(*size);
if (err != GRPC_ERROR_NONE) {
input_->SetError(err);

@ -76,9 +76,8 @@ void HPackTable::SetMaxBytes(uint32_t max_bytes) {
if (max_bytes_ == max_bytes) {
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes);
}
grpc_http_trace.Log(GPR_INFO, "Update hpack parser max size to %d",
max_bytes);
while (mem_used_ > max_bytes) {
EvictOne();
}
@ -96,9 +95,7 @@ grpc_error_handle HPackTable::SetCurrentTableSize(uint32_t bytes) {
max_bytes_)
.c_str());
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes);
}
grpc_http_trace.Log(GPR_INFO, "Update hpack parser table size to %d", bytes);
while (mem_used_ > bytes) {
EvictOne();
}

@ -300,9 +300,8 @@ static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t) {
case GRPC_CHTTP2_FRAME_GOAWAY:
return init_goaway_parser(t);
default:
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
grpc_http_trace.Log(GPR_ERROR, "Unknown frame type %02x",
t->incoming_frame_type);
return init_non_header_skip_frame_parser(t);
}
}
@ -786,9 +785,7 @@ static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
if (GPR_LIKELY(err == GRPC_ERROR_NONE)) {
return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, &unused)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_ERROR, "%s", grpc_error_std_string(err).c_str());
}
grpc_http_trace.Log(GPR_ERROR, "%s", grpc_error_std_string(err).c_str());
grpc_chttp2_parsing_become_skip_parser(t);
if (s) {
s->forced_close_error = err;

@ -89,10 +89,9 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
} else {
t->lists[id].tail = s->links[id].prev;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_http2_stream_state)) {
gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
grpc_trace_http2_stream_state.Log(GPR_INFO, "%p[%d][%s]: remove from %s", t,
s->id, t->is_client ? "cli" : "svr",
stream_list_id_string(id));
}
static bool stream_list_maybe_remove(grpc_chttp2_transport* t,
@ -121,10 +120,9 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
}
t->lists[id].tail = s;
s->included[id] = 1;
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_http2_stream_state)) {
gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
grpc_trace_http2_stream_state.Log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr",
stream_list_id_string(id));
}
static bool stream_list_add(grpc_chttp2_transport* t, grpc_chttp2_stream* s,

@ -159,32 +159,29 @@ static bool update_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_flowctl_trace)) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d moved to stalled list by %s. This is FULLY expected "
"to happen in a healthy program that is not seeing flow control stalls."
" However, if you know that there are unwanted stalls, here is some "
"helpful data: [fc:pending=%" PRIdPTR ":pending-compressed=%" PRIdPTR
":flowed=%" PRId64 ":peer_initwin=%d:t_win=%" PRId64
":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string.c_str(), t, s->id, staller,
s->flow_controlled_buffer.length,
s->stream_compression_method ==
GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
? 0
: s->compressed_data_buffer.length,
s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),
static_cast<uint32_t> GPR_MAX(
0,
s->flow_control->remote_window_delta() +
(int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
s->flow_control->remote_window_delta());
}
grpc_flowctl_trace.Log(
GPR_DEBUG,
"%s:%p stream %d moved to stalled list by %s. This is FULLY expected "
"to happen in a healthy program that is not seeing flow control stalls."
" However, if you know that there are unwanted stalls, here is some "
"helpful data: [fc:pending=%" PRIdPTR ":pending-compressed=%" PRIdPTR
":flowed=%" PRId64 ":peer_initwin=%d:t_win=%" PRId64
":s_win=%d:s_delta=%" PRId64 "]",
t->peer_string.c_str(), t, s->id, staller,
s->flow_controlled_buffer.length,
s->stream_compression_method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS
? 0
: s->compressed_data_buffer.length,
s->flow_controlled_bytes_flowed,
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
t->flow_control->remote_window(),
static_cast<uint32_t> GPR_MAX(
0,
s->flow_control->remote_window_delta() +
(int64_t)t->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]),
s->flow_control->remote_window_delta());
}
/* How many bytes would we like to put on the wire during a single syscall */

@ -54,10 +54,6 @@
#define GRPC_FLUSH_READ_SIZE 4096
grpc_core::TraceFlag grpc_cronet_trace(false, "cronet");
#define CRONET_LOG(...) \
do { \
if (grpc_cronet_trace.enabled()) gpr_log(__VA_ARGS__); \
} while (0)
enum e_op_result {
ACTION_TAKEN_WITH_CALLBACK,
@ -301,7 +297,7 @@ static void read_grpc_header(stream_obj* s) {
s->state.rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
s->state.rs.received_bytes = 0;
s->state.rs.compressed = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
s->state.rs.remaining_bytes);
}
@ -341,8 +337,8 @@ static void add_to_storage(struct stream_obj* s,
if (op->recv_trailing_metadata) {
s->state.pending_recv_trailing_metadata = true;
}
CRONET_LOG(GPR_DEBUG, "adding new op %p. %d in the queue.", new_op,
storage->num_pending_ops);
grpc_cronet_trace.Log(GPR_DEBUG, "adding new op %p. %d in the queue.", new_op,
storage->num_pending_ops);
gpr_mu_unlock(&s->mu);
}
@ -359,19 +355,20 @@ static void remove_from_storage(struct stream_obj* s,
s->storage.head = oas->next;
delete oas;
s->storage.num_pending_ops--;
CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
grpc_cronet_trace.Log(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
} else {
for (curr = s->storage.head; curr != nullptr; curr = curr->next) {
if (curr->next == oas) {
curr->next = oas->next;
s->storage.num_pending_ops--;
CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
grpc_cronet_trace.Log(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
s->storage.num_pending_ops);
delete oas;
break;
} else if (GPR_UNLIKELY(curr->next == nullptr)) {
CRONET_LOG(GPR_ERROR, "Reached end of LL and did not find op to free");
grpc_cronet_trace.Log(GPR_ERROR,
"Reached end of LL and did not find op to free");
}
}
}
@ -386,11 +383,12 @@ static void remove_from_storage(struct stream_obj* s,
static void execute_from_storage(stream_obj* s) {
gpr_mu_lock(&s->mu);
for (struct op_and_state* curr = s->storage.head; curr != nullptr;) {
CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done);
grpc_cronet_trace.Log(GPR_DEBUG, "calling op at %p. done = %d", curr,
curr->done);
GPR_ASSERT(!curr->done);
enum e_op_result result = execute_stream_op(curr);
CRONET_LOG(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr,
op_result_string(result));
grpc_cronet_trace.Log(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr,
op_result_string(result));
/* if this op is done, then remove it and free memory */
if (curr->done) {
struct op_and_state* next = curr->next;
@ -410,8 +408,9 @@ static void convert_cronet_array_to_metadata(
const bidirectional_stream_header_array* header_array,
grpc_chttp2_incoming_metadata_buffer* mds) {
for (size_t i = 0; i < header_array->count; i++) {
CRONET_LOG(GPR_DEBUG, "header key=%s, value=%s",
header_array->headers[i].key, header_array->headers[i].value);
grpc_cronet_trace.Log(GPR_DEBUG, "header key=%s, value=%s",
header_array->headers[i].key,
header_array->headers[i].value);
grpc_slice key = grpc_slice_intern(
grpc_slice_from_static_string(header_array->headers[i].key));
grpc_slice value;
@ -461,7 +460,7 @@ static void on_failed(bidirectional_stream* stream, int net_error) {
Cronet callback
*/
static void on_canceled(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
grpc_cronet_trace.Log(GPR_DEBUG, "on_canceled(%p)", stream);
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
@ -488,7 +487,7 @@ static void on_canceled(bidirectional_stream* stream) {
Cronet callback
*/
static void on_succeeded(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
grpc_cronet_trace.Log(GPR_DEBUG, "on_succeeded(%p)", stream);
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
@ -507,7 +506,7 @@ static void on_succeeded(bidirectional_stream* stream) {
Cronet callback
*/
static void on_stream_ready(bidirectional_stream* stream) {
CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
grpc_cronet_trace.Log(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
@ -524,7 +523,8 @@ static void on_stream_ready(bidirectional_stream* stream) {
* SEND_TRAILING_METADATA ops pending */
if (t->use_packet_coalescing) {
if (s->state.flush_cronet_when_ready) {
CRONET_LOG(GPR_DEBUG, "cronet_bidirectional_stream_flush (%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "cronet_bidirectional_stream_flush (%p)",
s->cbs);
bidirectional_stream_flush(stream);
}
}
@ -541,8 +541,9 @@ static void on_response_headers_received(
const char* negotiated_protocol) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
headers, negotiated_protocol);
grpc_cronet_trace.Log(GPR_DEBUG,
"R: on_response_headers_received(%p, %p, %s)", stream,
headers, negotiated_protocol);
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
/* Identify if this is a header or a trailer (in a trailer-only response case)
@ -579,7 +580,8 @@ static void on_write_completed(bidirectional_stream* stream, const char* data) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
grpc_cronet_trace.Log(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream,
data);
gpr_mu_lock(&s->mu);
if (s->state.ws.write_buffer) {
gpr_free(s->state.ws.write_buffer);
@ -598,12 +600,12 @@ static void on_read_completed(bidirectional_stream* stream, char* data,
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
count);
grpc_cronet_trace.Log(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream,
data, count);
gpr_mu_lock(&s->mu);
s->state.state_callback_received[OP_RECV_MESSAGE] = true;
if (count > 0 && s->state.flush_read) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
bidirectional_stream_read(s->cbs, s->state.rs.read_buffer,
GRPC_FLUSH_READ_SIZE);
gpr_mu_unlock(&s->mu);
@ -611,7 +613,7 @@ static void on_read_completed(bidirectional_stream* stream, char* data,
s->state.rs.received_bytes += count;
s->state.rs.remaining_bytes -= count;
if (s->state.rs.remaining_bytes > 0) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
s->state.state_op_done[OP_READ_REQ_MADE] = true;
bidirectional_stream_read(
s->cbs, s->state.rs.read_buffer + s->state.rs.received_bytes,
@ -637,8 +639,8 @@ static void on_response_trailers_received(
const bidirectional_stream_header_array* trailers) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
CRONET_LOG(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)", stream,
trailers);
grpc_cronet_trace.Log(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)",
stream, trailers);
stream_obj* s = static_cast<stream_obj*>(stream->annotation);
grpc_cronet_transport* t = s->curr_ct;
gpr_mu_lock(&s->mu);
@ -653,11 +655,13 @@ static void on_response_trailers_received(
if (!s->state.state_op_done[OP_SEND_TRAILING_METADATA] &&
!(s->state.state_op_done[OP_CANCEL_ERROR] ||
s->state.state_callback_received[OP_FAILED])) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_write (%p, 0)",
s->cbs);
s->state.state_callback_received[OP_SEND_MESSAGE] = false;
bidirectional_stream_write(s->cbs, "", 0, true);
if (t->use_packet_coalescing) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_flush (%p)",
s->cbs);
bidirectional_stream_flush(s->cbs);
}
s->state.state_op_done[OP_SEND_TRAILING_METADATA] = true;
@ -770,7 +774,7 @@ static void convert_metadata_to_cronet_headers(
gpr_free(value);
continue;
}
CRONET_LOG(GPR_DEBUG, "header %s = %s", key, value);
grpc_cronet_trace.Log(GPR_DEBUG, "header %s = %s", key, value);
headers[num_headers].key = key;
headers[num_headers].value = value;
num_headers++;
@ -833,34 +837,34 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
stream_state->state_callback_received[OP_FAILED];
if (is_canceled_or_failed) {
if (op_id == OP_SEND_INITIAL_METADATA) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
if (op_id == OP_SEND_MESSAGE) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
if (op_id == OP_SEND_TRAILING_METADATA) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
if (op_id == OP_CANCEL_ERROR) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
/* already executed */
if (op_id == OP_RECV_INITIAL_METADATA &&
stream_state->state_op_done[OP_RECV_INITIAL_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
if (op_id == OP_RECV_MESSAGE && op_state->state_op_done[OP_RECV_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
if (op_id == OP_RECV_TRAILING_METADATA &&
stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
/* ON_COMPLETE can be processed if one of the following conditions is met:
@ -873,7 +877,7 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
stream_state->state_callback_received[OP_CANCELED] ||
stream_state->state_callback_received[OP_SUCCEEDED] ||
!stream_state->state_op_done[OP_SEND_INITIAL_METADATA])) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
} else if (op_id == OP_SEND_INITIAL_METADATA) {
@ -955,7 +959,7 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
if (op_state->state_op_done[OP_ON_COMPLETE]) {
/* already executed (note we're checking op specific state, not stream
state) */
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
/* Check if every op that was asked for is done. */
@ -965,36 +969,36 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
* there are still recv ops pending. */
else if (curr_op->send_initial_metadata &&
!stream_state->state_callback_received[OP_SEND_INITIAL_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->send_message &&
!op_state->state_op_done[OP_SEND_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->send_message &&
!stream_state->state_callback_received[OP_SEND_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->send_trailing_metadata &&
!stream_state->state_op_done[OP_SEND_TRAILING_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_initial_metadata &&
!stream_state->state_op_done[OP_RECV_INITIAL_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_message &&
!op_state->state_op_done[OP_RECV_MESSAGE]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->cancel_stream &&
!stream_state->state_callback_received[OP_CANCELED]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
} else if (curr_op->recv_trailing_metadata) {
/* We aren't done with trailing metadata yet */
if (!stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
/* We've asked for actual message in an earlier op, and it hasn't been
@ -1004,7 +1008,7 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
op has asked), and the read hasn't been delivered. */
if (!curr_op->recv_message &&
!stream_state->state_callback_received[OP_SUCCEEDED]) {
CRONET_LOG(GPR_DEBUG, "Because");
grpc_cronet_trace.Log(GPR_DEBUG, "Because");
result = false;
}
}
@ -1016,8 +1020,8 @@ static bool op_can_be_run(grpc_transport_stream_op_batch* curr_op,
result = false;
}
}
CRONET_LOG(GPR_DEBUG, "op_can_be_run %s : %s", op_id_string(op_id),
result ? "YES" : "NO");
grpc_cronet_trace.Log(GPR_DEBUG, "op_can_be_run %s : %s", op_id_string(op_id),
result ? "YES" : "NO");
return result;
}
@ -1032,14 +1036,16 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
enum e_op_result result = NO_ACTION_POSSIBLE;
if (stream_op->send_initial_metadata &&
op_can_be_run(stream_op, s, &oas->state, OP_SEND_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_SEND_INITIAL_METADATA",
oas);
/* Start new cronet stream. It is destroyed in on_succeeded, on_canceled,
* on_failed */
GPR_ASSERT(s->cbs == nullptr);
GPR_ASSERT(!stream_state->state_op_done[OP_SEND_INITIAL_METADATA]);
s->cbs =
bidirectional_stream_create(t->engine, s->curr_gs, &cronet_callbacks);
CRONET_LOG(GPR_DEBUG, "%p = bidirectional_stream_create()", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "%p = bidirectional_stream_create()",
s->cbs);
if (t->use_packet_coalescing) {
bidirectional_stream_disable_auto_flush(s->cbs, true);
bidirectional_stream_delay_request_headers_until_flush(s->cbs, true);
@ -1052,8 +1058,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
t->host, &url, &s->header_array.headers, &s->header_array.count,
&method);
s->header_array.capacity = s->header_array.count;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_start(%p, %s)", s->cbs,
url.c_str());
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_start(%p, %s)",
s->cbs, url.c_str());
bidirectional_stream_start(s->cbs, url.c_str(), 0, method, &s->header_array,
false);
unsigned int header_index;
@ -1071,13 +1077,14 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_WITH_CALLBACK;
} else if (stream_op->send_message &&
op_can_be_run(stream_op, s, &oas->state, OP_SEND_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_SEND_MESSAGE", oas);
stream_state->pending_send_message = false;
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
grpc_cronet_trace.Log(GPR_DEBUG,
"Stream is either cancelled, failed or finished");
} else {
grpc_slice_buffer write_slice_buffer;
grpc_slice slice;
@ -1105,15 +1112,16 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
&write_buffer_size,
stream_op->payload->send_message.send_message->flags());
if (write_buffer_size > 0) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, %p)", s->cbs,
stream_state->ws.write_buffer);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_write (%p, %p)",
s->cbs, stream_state->ws.write_buffer);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
bidirectional_stream_write(s->cbs, stream_state->ws.write_buffer,
static_cast<int>(write_buffer_size), false);
grpc_slice_buffer_destroy_internal(&write_slice_buffer);
if (t->use_packet_coalescing) {
if (!stream_op->send_trailing_metadata) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_flush (%p)",
s->cbs);
bidirectional_stream_flush(s->cbs);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
@ -1134,18 +1142,22 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
} else if (stream_op->send_trailing_metadata &&
op_can_be_run(stream_op, s, &oas->state,
OP_SEND_TRAILING_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_SEND_TRAILING_METADATA",
oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR] ||
stream_state->state_callback_received[OP_FAILED] ||
stream_state->state_callback_received[OP_SUCCEEDED]) {
result = NO_ACTION_POSSIBLE;
CRONET_LOG(GPR_DEBUG, "Stream is either cancelled, failed or finished");
grpc_cronet_trace.Log(GPR_DEBUG,
"Stream is either cancelled, failed or finished");
} else {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, 0)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_write (%p, 0)",
s->cbs);
stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
bidirectional_stream_write(s->cbs, "", 0, true);
if (t->use_packet_coalescing) {
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_flush (%p)",
s->cbs);
bidirectional_stream_flush(s->cbs);
}
result = ACTION_TAKEN_WITH_CALLBACK;
@ -1154,7 +1166,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
} else if (stream_op->recv_initial_metadata &&
op_can_be_run(stream_op, s, &oas->state,
OP_RECV_INITIAL_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_RECV_INITIAL_METADATA",
oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
@ -1183,9 +1196,9 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_op->recv_message &&
op_can_be_run(stream_op, s, &oas->state, OP_RECV_MESSAGE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_RECV_MESSAGE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
grpc_cronet_trace.Log(GPR_DEBUG, "Stream is cancelled.");
grpc_core::ExecCtx::Run(
DEBUG_LOCATION, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -1193,7 +1206,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->state_callback_received[OP_FAILED]) {
CRONET_LOG(GPR_DEBUG, "Stream failed.");
grpc_cronet_trace.Log(GPR_DEBUG, "Stream failed.");
grpc_core::ExecCtx::Run(
DEBUG_LOCATION, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -1202,7 +1215,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->rs.read_stream_closed) {
/* No more data will be received */
CRONET_LOG(GPR_DEBUG, "read stream closed");
grpc_cronet_trace.Log(GPR_DEBUG, "read stream closed");
grpc_core::ExecCtx::Run(
DEBUG_LOCATION, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -1210,7 +1223,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
oas->state.state_op_done[OP_RECV_MESSAGE] = true;
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_state->flush_read) {
CRONET_LOG(GPR_DEBUG, "flush read");
grpc_cronet_trace.Log(GPR_DEBUG, "flush read");
grpc_core::ExecCtx::Run(
DEBUG_LOCATION, stream_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE);
@ -1225,15 +1238,16 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
parse_grpc_header(
reinterpret_cast<const uint8_t*>(stream_state->rs.read_buffer),
&stream_state->rs.length_field, &stream_state->rs.compressed);
CRONET_LOG(GPR_DEBUG, "length field = %d",
stream_state->rs.length_field);
grpc_cronet_trace.Log(GPR_DEBUG, "length field = %d",
stream_state->rs.length_field);
if (stream_state->rs.length_field > 0) {
stream_state->rs.read_buffer = static_cast<char*>(
gpr_malloc(static_cast<size_t>(stream_state->rs.length_field)));
GPR_ASSERT(stream_state->rs.read_buffer);
stream_state->rs.remaining_bytes = stream_state->rs.length_field;
stream_state->rs.received_bytes = 0;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_read(%p)",
s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
@ -1241,7 +1255,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
stream_state->rs.remaining_bytes = 0;
CRONET_LOG(GPR_DEBUG, "read operation complete. Empty response.");
grpc_cronet_trace.Log(GPR_DEBUG,
"read operation complete. Empty response.");
/* Clean up read_slice_buffer in case there is unread data. */
grpc_slice_buffer_destroy_internal(
&stream_state->rs.read_slice_buffer);
@ -1273,7 +1288,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
stream_state->rs.remaining_bytes = GRPC_HEADER_SIZE_IN_BYTES;
stream_state->rs.received_bytes = 0;
stream_state->rs.compressed = false;
CRONET_LOG(GPR_DEBUG, "bidirectional_stream_read(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "bidirectional_stream_read(%p)",
s->cbs);
stream_state->state_op_done[OP_READ_REQ_MADE] =
true; /* Indicates that at least one read request has been made */
bidirectional_stream_read(s->cbs, stream_state->rs.read_buffer,
@ -1283,7 +1299,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = NO_ACTION_POSSIBLE;
}
} else if (stream_state->rs.remaining_bytes == 0) {
CRONET_LOG(GPR_DEBUG, "read operation complete");
grpc_cronet_trace.Log(GPR_DEBUG, "read operation complete");
grpc_slice read_data_slice =
GRPC_SLICE_MALLOC((uint32_t)stream_state->rs.length_field);
uint8_t* dst_p = GRPC_SLICE_START_PTR(read_data_slice);
@ -1316,7 +1332,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
} else if (stream_op->recv_trailing_metadata &&
op_can_be_run(stream_op, s, &oas->state,
OP_RECV_TRAILING_METADATA)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_TRAILING_METADATA", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_RECV_TRAILING_METADATA",
oas);
grpc_error_handle error = GRPC_ERROR_NONE;
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
error = GRPC_ERROR_REF(stream_state->cancel_error);
@ -1340,9 +1357,10 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_NO_CALLBACK;
} else if (stream_op->cancel_stream &&
op_can_be_run(stream_op, s, &oas->state, OP_CANCEL_ERROR)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_CANCEL_ERROR", oas);
if (s->cbs) {
CRONET_LOG(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)", s->cbs);
grpc_cronet_trace.Log(GPR_DEBUG, "W: bidirectional_stream_cancel(%p)",
s->cbs);
bidirectional_stream_cancel(s->cbs);
result = ACTION_TAKEN_WITH_CALLBACK;
} else {
@ -1354,7 +1372,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
GRPC_ERROR_REF(stream_op->payload->cancel_stream.cancel_error);
}
} else if (op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
grpc_cronet_trace.Log(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
if (stream_op->on_complete) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, stream_op->on_complete,
@ -1439,7 +1457,7 @@ static void set_pollset_set_do_nothing(grpc_transport* /*gt*/,
static void perform_stream_op(grpc_transport* /*gt*/, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
CRONET_LOG(GPR_DEBUG, "perform_stream_op");
grpc_cronet_trace.Log(GPR_DEBUG, "perform_stream_op");
if (op->send_initial_metadata &&
header_has_authority(op->payload->send_initial_metadata
.send_initial_metadata->list.head)) {

@ -1587,10 +1587,8 @@ grpc_error_handle RetryPolicyParse(
} else if (code == "unavailable") {
retry_to_return.retry_on.Add(GRPC_STATUS_UNAVAILABLE);
} else {
if (GRPC_TRACE_FLAG_ENABLED(*context.tracer)) {
gpr_log(GPR_INFO, "Unsupported retry_on policy %s.",
std::string(code).c_str());
}
context.tracer->Log(GPR_INFO, "Unsupported retry_on policy %s.",
std::string(code).c_str());
}
}
// TODO(donnadionne): when we add support for per_try_timeout, we will need to

@ -200,10 +200,9 @@ class XdsClient::ChannelState::AdsCallState
.c_str());
watcher_error = grpc_error_set_int(
watcher_error, GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] %s", ads_calld_->xds_client(),
grpc_error_std_string(watcher_error).c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] %s",
ads_calld_->xds_client(),
grpc_error_std_string(watcher_error).c_str());
if (type_url_ == XdsApi::kLdsTypeUrl) {
ListenerState& state = ads_calld_->xds_client()->listener_map_[name_];
state.meta.client_status = XdsApi::ResourceMetadata::DOES_NOT_EXIST;
@ -487,20 +486,17 @@ XdsClient::ChannelState::ChannelState(WeakRefCountedPtr<XdsClient> xds_client,
: nullptr),
xds_client_(std::move(xds_client)),
server_(server) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] creating channel to %s",
xds_client_.get(), server.server_uri.c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] creating channel to %s",
xds_client_.get(), server.server_uri.c_str());
channel_ = CreateXdsChannel(xds_client_->args_, server);
GPR_ASSERT(channel_ != nullptr);
StartConnectivityWatchLocked();
}
XdsClient::ChannelState::~ChannelState() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] Destroying xds channel %p", xds_client(),
this);
}
grpc_xds_client_trace.Log(GPR_INFO,
"[xds_client %p] Destroying xds channel %p",
xds_client(), this);
grpc_channel_destroy(channel_);
xds_client_.reset(DEBUG_LOCATION, "ChannelState");
}
@ -629,12 +625,11 @@ void XdsClient::ChannelState::RetryableCall<T>::StartNewCallLocked() {
if (shutting_down_) return;
GPR_ASSERT(chand_->channel_ != nullptr);
GPR_ASSERT(calld_ == nullptr);
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Start new call from retryable call (chand: %p, "
"retryable call: %p)",
chand()->xds_client(), chand(), this);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Start new call from retryable call (chand: %p, "
"retryable call: %p)",
chand()->xds_client(), chand(), this);
calld_ = MakeOrphanable<T>(
this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
}
@ -643,13 +638,12 @@ template <typename T>
void XdsClient::ChannelState::RetryableCall<T>::StartRetryTimerLocked() {
if (shutting_down_) return;
const grpc_millis next_attempt_time = backoff_.NextAttemptTime();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
grpc_millis timeout = GPR_MAX(next_attempt_time - ExecCtx::Get()->Now(), 0);
gpr_log(GPR_INFO,
"[xds_client %p] Failed to connect to xds server (chand: %p) "
"retry timer will fire in %" PRId64 "ms.",
chand()->xds_client(), chand(), timeout);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Failed to connect to xds server (chand: %p) "
"retry timer will fire in %" PRId64 "ms.",
chand()->xds_client(), chand(),
GPR_MAX(next_attempt_time - ExecCtx::Get()->Now(), 0));
this->Ref(DEBUG_LOCATION, "RetryableCall+retry_timer_start").release();
grpc_timer_init(&retry_timer_, next_attempt_time, &on_retry_timer_);
retry_timer_callback_pending_ = true;
@ -671,12 +665,10 @@ void XdsClient::ChannelState::RetryableCall<T>::OnRetryTimerLocked(
grpc_error_handle error) {
retry_timer_callback_pending_ = false;
if (!shutting_down_ && error == GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(
GPR_INFO,
"[xds_client %p] Retry timer fires (chand: %p, retryable call: %p)",
chand()->xds_client(), chand(), this);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Retry timer fires (chand: %p, retryable call: %p)",
chand()->xds_client(), chand(), this);
StartNewCallLocked();
}
GRPC_ERROR_UNREF(error);
@ -711,12 +703,11 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
grpc_metadata_array_init(&initial_metadata_recv_);
grpc_metadata_array_init(&trailing_metadata_recv_);
// Start the call.
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Starting ADS call (chand: %p, calld: %p, "
"call: %p)",
xds_client(), chand(), this, call_);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Starting ADS call (chand: %p, calld: %p, "
"call: %p)",
xds_client(), chand(), this, call_);
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -829,15 +820,14 @@ void XdsClient::ChannelState::AdsCallState::SendMessageLocked(
state_map_.erase(type_url);
}
sent_initial_message_ = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] sending ADS request: type=%s version=%s nonce=%s "
"error=%s resources=%s",
xds_client(), type_url.c_str(),
xds_client()->resource_version_map_[type_url].c_str(),
state.nonce.c_str(), grpc_error_std_string(state.error).c_str(),
absl::StrJoin(resource_names, " ").c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] sending ADS request: type=%s version=%s nonce=%s "
"error=%s resources=%s",
xds_client(), type_url.c_str(),
xds_client()->resource_version_map_[type_url].c_str(),
state.nonce.c_str(), grpc_error_std_string(state.error).c_str(),
absl::StrJoin(resource_names, " ").c_str());
GRPC_ERROR_UNREF(state.error);
state.error = GRPC_ERROR_NONE;
// Create message payload.
@ -905,12 +895,10 @@ XdsApi::ResourceMetadata CreateResourceMetadataAcked(
void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdateLocked(
std::string version, grpc_millis update_time,
XdsApi::LdsUpdateMap lds_update_map) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] LDS update received containing %" PRIuPTR
" resources",
xds_client(), lds_update_map.size());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] LDS update received containing %" PRIuPTR " resources",
xds_client(), lds_update_map.size());
auto& lds_state = state_map_[XdsApi::kLdsTypeUrl];
std::set<std::string> rds_resource_names_seen;
for (auto& p : lds_update_map) {
@ -918,10 +906,9 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdateLocked(
XdsApi::LdsUpdate& lds_update = p.second.resource;
auto& state = lds_state.subscribed_resources[listener_name];
if (state != nullptr) state->Finish();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] LDS resource %s: %s", xds_client(),
listener_name.c_str(), lds_update.ToString().c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] LDS resource %s: %s",
xds_client(), listener_name.c_str(),
lds_update.ToString().c_str());
// Record the RDS resource names seen.
if (!lds_update.http_connection_manager.route_config_name.empty()) {
rds_resource_names_seen.insert(
@ -931,12 +918,11 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdateLocked(
ListenerState& listener_state = xds_client()->listener_map_[listener_name];
if (listener_state.update.has_value() &&
*listener_state.update == lds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] LDS update for %s identical to current, "
"ignoring.",
xds_client(), listener_name.c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] LDS update for %s identical to current, "
"ignoring.",
xds_client(), listener_name.c_str());
continue;
}
// Update the listener state.
@ -989,32 +975,27 @@ void XdsClient::ChannelState::AdsCallState::AcceptLdsUpdateLocked(
void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdateLocked(
std::string version, grpc_millis update_time,
XdsApi::RdsUpdateMap rds_update_map) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] RDS update received containing %" PRIuPTR
" resources",
xds_client(), rds_update_map.size());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] RDS update received containing %" PRIuPTR " resources",
xds_client(), rds_update_map.size());
auto& rds_state = state_map_[XdsApi::kRdsTypeUrl];
for (auto& p : rds_update_map) {
const std::string& route_config_name = p.first;
XdsApi::RdsUpdate& rds_update = p.second.resource;
auto& state = rds_state.subscribed_resources[route_config_name];
if (state != nullptr) state->Finish();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] RDS resource:\n%s", xds_client(),
rds_update.ToString().c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] RDS resource:\n%s",
xds_client(), rds_update.ToString().c_str());
RouteConfigState& route_config_state =
xds_client()->route_config_map_[route_config_name];
// Ignore identical update.
if (route_config_state.update.has_value() &&
*route_config_state.update == rds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] RDS resource identical to current, ignoring",
xds_client());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] RDS resource identical to current, ignoring",
xds_client());
continue;
}
// Update the cache.
@ -1031,12 +1012,10 @@ void XdsClient::ChannelState::AdsCallState::AcceptRdsUpdateLocked(
void XdsClient::ChannelState::AdsCallState::AcceptCdsUpdateLocked(
std::string version, grpc_millis update_time,
XdsApi::CdsUpdateMap cds_update_map) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] CDS update received containing %" PRIuPTR
" resources",
xds_client(), cds_update_map.size());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] CDS update received containing %" PRIuPTR " resources",
xds_client(), cds_update_map.size());
auto& cds_state = state_map_[XdsApi::kCdsTypeUrl];
std::set<std::string> eds_resource_names_seen;
for (auto& p : cds_update_map) {
@ -1044,10 +1023,9 @@ void XdsClient::ChannelState::AdsCallState::AcceptCdsUpdateLocked(
XdsApi::CdsUpdate& cds_update = p.second.resource;
auto& state = cds_state.subscribed_resources[cluster_name];
if (state != nullptr) state->Finish();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] cluster=%s: %s", xds_client(),
cluster_name, cds_update.ToString().c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] cluster=%s: %s",
xds_client(), cluster_name,
cds_update.ToString().c_str());
// Record the EDS resource names seen.
eds_resource_names_seen.insert(cds_update.eds_service_name.empty()
? cluster_name
@ -1056,11 +1034,10 @@ void XdsClient::ChannelState::AdsCallState::AcceptCdsUpdateLocked(
ClusterState& cluster_state = xds_client()->cluster_map_[cluster_name];
if (cluster_state.update.has_value() &&
*cluster_state.update == cds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] CDS update identical to current, ignoring.",
xds_client());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] CDS update identical to current, ignoring.",
xds_client());
continue;
}
// Update the cluster state.
@ -1112,32 +1089,28 @@ void XdsClient::ChannelState::AdsCallState::AcceptCdsUpdateLocked(
void XdsClient::ChannelState::AdsCallState::AcceptEdsUpdateLocked(
std::string version, grpc_millis update_time,
XdsApi::EdsUpdateMap eds_update_map) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] EDS update received containing %" PRIuPTR
" resources",
xds_client(), eds_update_map.size());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] EDS update received containing %" PRIuPTR " resources",
xds_client(), eds_update_map.size());
auto& eds_state = state_map_[XdsApi::kEdsTypeUrl];
for (auto& p : eds_update_map) {
const char* eds_service_name = p.first.c_str();
XdsApi::EdsUpdate& eds_update = p.second.resource;
auto& state = eds_state.subscribed_resources[eds_service_name];
if (state != nullptr) state->Finish();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] EDS resource %s: %s", xds_client(),
eds_service_name, eds_update.ToString().c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] EDS resource %s: %s",
xds_client(), eds_service_name,
eds_update.ToString().c_str());
EndpointState& endpoint_state =
xds_client()->endpoint_map_[eds_service_name];
// Ignore identical update.
if (endpoint_state.update.has_value() &&
*endpoint_state.update == eds_update) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] EDS update identical to current, ignoring.",
xds_client());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] EDS update identical to current, ignoring.",
xds_client());
continue;
}
// Update the cluster state.
@ -1170,13 +1143,11 @@ template <typename StateMap>
void XdsClient::ChannelState::AdsCallState::RejectAdsUpdateLocked(
grpc_millis update_time, const XdsApi::AdsParseResult& result,
StateMap* state_map) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] %s update NACKed containing %" PRIuPTR
" resources",
xds_client(), result.type_url.c_str(),
result.resource_names_failed.size());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] %s update NACKed containing %" PRIuPTR " resources",
xds_client(), result.type_url.c_str(),
result.resource_names_failed.size());
std::string details = grpc_error_std_string(result.parse_error);
for (auto& name : result.resource_names_failed) {
auto it = state_map->find(name);
@ -1555,12 +1526,11 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
grpc_metadata_array_init(&initial_metadata_recv_);
grpc_metadata_array_init(&trailing_metadata_recv_);
// Start the call.
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Starting LRS call (chand: %p, calld: %p, "
"call: %p)",
xds_client(), chand(), this, call_);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Starting LRS call (chand: %p, calld: %p, "
"call: %p)",
xds_client(), chand(), this, call_);
// Create the ops.
grpc_call_error call_error;
grpc_op ops[3];
@ -1743,23 +1713,21 @@ bool XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked() {
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS) {
new_load_reporting_interval =
GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Increased load_report_interval to minimum "
"value %dms",
xds_client(), GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Increased load_report_interval to minimum "
"value %dms",
xds_client(), GRPC_XDS_MIN_CLIENT_LOAD_REPORTING_INTERVAL_MS);
}
// Ignore identical update.
if (send_all_clusters == send_all_clusters_ &&
cluster_names_ == new_cluster_names &&
load_reporting_interval_ == new_load_reporting_interval) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] Incoming LRS response identical to current, "
"ignoring.",
xds_client());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] Incoming LRS response identical to current, "
"ignoring.",
xds_client());
return;
}
// Stop current load reporting (if any) to adopt the new config.
@ -1865,18 +1833,16 @@ XdsClient::XdsClient(std::unique_ptr<XdsBootstrap> bootstrap,
bootstrap_->certificate_providers())),
api_(this, &grpc_xds_client_trace, bootstrap_->node(),
&bootstrap_->certificate_providers()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] creating xds client", this);
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] creating xds client",
this);
// Create ChannelState object.
chand_ = MakeOrphanable<ChannelState>(
WeakRef(DEBUG_LOCATION, "XdsClient+ChannelState"), bootstrap_->server());
}
XdsClient::~XdsClient() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] destroying xds client", this);
}
grpc_xds_client_trace.Log(GPR_INFO, "[xds_client %p] destroying xds client",
this);
grpc_channel_args_destroy(args_);
grpc_pollset_set_destroy(interested_parties_);
}
@ -1902,9 +1868,8 @@ void XdsClient::RemoveChannelzLinkage(
}
void XdsClient::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] shutting down xds client", this);
}
grpc_xds_client_trace.Log(GPR_INFO,
"[xds_client %p] shutting down xds client", this);
{
MutexLock lock(g_mu);
if (g_xds_client == this) g_xds_client = nullptr;
@ -1938,10 +1903,9 @@ void XdsClient::WatchListenerData(
// If we've already received an LDS update, notify the new watcher
// immediately.
if (listener_state.update.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] returning cached listener data for %s",
this, listener_name_str.c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] returning cached listener data for %s", this,
listener_name_str.c_str());
w->OnListenerChanged(*listener_state.update);
}
chand_->SubscribeLocked(XdsApi::kLdsTypeUrl, listener_name_str);
@ -1977,11 +1941,9 @@ void XdsClient::WatchRouteConfigData(
// If we've already received an RDS update, notify the new watcher
// immediately.
if (route_config_state.update.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] returning cached route config data for %s", this,
route_config_name_str.c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] returning cached route config data for %s",
this, route_config_name_str.c_str());
w->OnRouteConfigChanged(*route_config_state.update);
}
chand_->SubscribeLocked(XdsApi::kRdsTypeUrl, route_config_name_str);
@ -2017,10 +1979,9 @@ void XdsClient::WatchClusterData(
// If we've already received a CDS update, notify the new watcher
// immediately.
if (cluster_state.update.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] returning cached cluster data for %s",
this, cluster_name_str.c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] returning cached cluster data for %s", this,
cluster_name_str.c_str());
w->OnClusterChanged(cluster_state.update.value());
}
chand_->SubscribeLocked(XdsApi::kCdsTypeUrl, cluster_name_str);
@ -2055,10 +2016,9 @@ void XdsClient::WatchEndpointData(
// If we've already received an EDS update, notify the new watcher
// immediately.
if (endpoint_state.update.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] returning cached endpoint data for %s",
this, eds_service_name_str.c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] returning cached endpoint data for %s", this,
eds_service_name_str.c_str());
w->OnEndpointChanged(endpoint_state.update.value());
}
chand_->SubscribeLocked(XdsApi::kEdsTypeUrl, eds_service_name_str);
@ -2234,9 +2194,8 @@ void XdsClient::NotifyOnErrorLocked(grpc_error_handle error) {
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
bool send_all_clusters, const std::set<std::string>& clusters) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] start building load report", this);
}
grpc_xds_client_trace.Log(GPR_INFO,
"[xds_client %p] start building load report", this);
XdsApi::ClusterLoadReportMap snapshot_map;
for (auto load_report_it = load_report_map_.begin();
load_report_it != load_report_map_.end();) {
@ -2258,12 +2217,11 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
if (load_report.drop_stats != nullptr) {
snapshot.dropped_requests +=
load_report.drop_stats->GetSnapshotAndReset();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] cluster=%s eds_service_name=%s drop_stats=%p",
this, cluster_key.first.c_str(), cluster_key.second.c_str(),
load_report.drop_stats);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] cluster=%s eds_service_name=%s drop_stats=%p", this,
cluster_key.first.c_str(), cluster_key.second.c_str(),
load_report.drop_stats);
}
// Aggregate locality stats.
for (auto it = load_report.locality_stats.begin();
@ -2276,14 +2234,13 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
if (locality_state.locality_stats != nullptr) {
locality_snapshot +=
locality_state.locality_stats->GetSnapshotAndReset();
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] cluster=%s eds_service_name=%s "
"locality=%s locality_stats=%p",
this, cluster_key.first.c_str(), cluster_key.second.c_str(),
locality_name->AsHumanReadableString().c_str(),
locality_state.locality_stats);
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] cluster=%s eds_service_name=%s "
"locality=%s locality_stats=%p",
this, cluster_key.first.c_str(), cluster_key.second.c_str(),
locality_name->AsHumanReadableString().c_str(),
locality_state.locality_stats);
}
// If the only thing left in this entry was final snapshots from
// deleted locality stats objects, remove the entry.
@ -2375,12 +2332,11 @@ std::string GetBootstrapContents(const char* fallback_config,
// First, try GRPC_XDS_BOOTSTRAP env var.
grpc_core::UniquePtr<char> path(gpr_getenv("GRPC_XDS_BOOTSTRAP"));
if (path != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: %s",
path.get());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: %s",
path.get());
grpc_slice contents;
*error =
grpc_load_file(path.get(), /*add_null_terminator=*/true, &contents);
@ -2393,18 +2349,16 @@ std::string GetBootstrapContents(const char* fallback_config,
grpc_core::UniquePtr<char> env_config(
gpr_getenv("GRPC_XDS_BOOTSTRAP_CONFIG"));
if (env_config != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"Got bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG "
"environment variable");
}
grpc_xds_client_trace.Log(
GPR_INFO,
"Got bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG "
"environment variable");
return env_config.get();
}
// Finally, try fallback config.
if (fallback_config != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "Got bootstrap contents from fallback config");
}
grpc_xds_client_trace.Log(GPR_INFO,
"Got bootstrap contents from fallback config");
return fallback_config;
}
// No bootstrap config found.
@ -2446,10 +2400,8 @@ RefCountedPtr<XdsClient> XdsClient::GetOrCreate(const grpc_channel_args* args,
std::string bootstrap_contents =
GetBootstrapContents(g_fallback_bootstrap_config, error);
if (*error != GRPC_ERROR_NONE) return nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "xDS bootstrap contents: %s",
bootstrap_contents.c_str());
}
grpc_xds_client_trace.Log(GPR_INFO, "xDS bootstrap contents: %s",
bootstrap_contents.c_str());
// Parse bootstrap.
std::unique_ptr<XdsBootstrap> bootstrap =
XdsBootstrap::Create(bootstrap_contents, error);

@ -52,22 +52,19 @@ XdsClusterDropStats::XdsClusterDropStats(RefCountedPtr<XdsClient> xds_client,
lrs_server_name_(lrs_server_name),
cluster_name_(cluster_name),
eds_service_name_(eds_service_name) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO, "[xds_client %p] created drop stats %p for {%s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] created drop stats %p for {%s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str());
}
XdsClusterDropStats::~XdsClusterDropStats() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] destroying drop stats %p for {%s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO, "[xds_client %p] destroying drop stats %p for {%s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str());
xds_client_->RemoveClusterDropStats(lrs_server_name_, cluster_name_,
eds_service_name_, this);
xds_client_.reset(DEBUG_LOCATION, "DropStats");
@ -106,25 +103,23 @@ XdsClusterLocalityStats::XdsClusterLocalityStats(
cluster_name_(cluster_name),
eds_service_name_(eds_service_name),
name_(std::move(name)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] created locality stats %p for {%s, %s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str(),
name_->AsHumanReadableString().c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] created locality stats %p for {%s, %s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str(),
name_->AsHumanReadableString().c_str());
}
XdsClusterLocalityStats::~XdsClusterLocalityStats() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_client_trace)) {
gpr_log(GPR_INFO,
"[xds_client %p] destroying locality stats %p for {%s, %s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str(),
name_->AsHumanReadableString().c_str());
}
grpc_xds_client_trace.Log(
GPR_INFO,
"[xds_client %p] destroying locality stats %p for {%s, %s, %s, %s}",
xds_client_.get(), this, std::string(lrs_server_name_).c_str(),
std::string(cluster_name_).c_str(),
std::string(eds_service_name_).c_str(),
name_->AsHumanReadableString().c_str());
xds_client_->RemoveClusterLocalityStats(lrs_server_name_, cluster_name_,
eds_service_name_, name_, this);
xds_client_.reset(DEBUG_LOCATION, "LocalityStats");

@ -413,12 +413,10 @@ class XdsServerConfigFetcher : public grpc_server_config_fetcher {
ListenerWatcher& operator=(const ListenerWatcher&) = delete;
void OnListenerChanged(XdsApi::LdsUpdate listener) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_xds_server_config_fetcher_trace)) {
gpr_log(
GPR_INFO,
"[ListenerWatcher %p] Received LDS update from xds client %p: %s",
this, xds_client_.get(), listener.ToString().c_str());
}
grpc_xds_server_config_fetcher_trace.Log(
GPR_INFO,
"[ListenerWatcher %p] Received LDS update from xds client %p: %s",
this, xds_client_.get(), listener.ToString().c_str());
if (listener.address != listening_address_) {
OnFatalError(absl::FailedPreconditionError(
"Address in LDS update does not match listening address"));

@ -56,12 +56,10 @@ std::string HandshakerArgsString(HandshakerArgs* args) {
HandshakeManager::HandshakeManager() {}
void HandshakeManager::Add(RefCountedPtr<Handshaker> handshaker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), handshakers_.size());
}
grpc_handshaker_trace.Log(
GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), handshakers_.size());
MutexLock lock(&mu_);
handshakers_.push_back(std::move(handshaker));
}
@ -84,13 +82,11 @@ void HandshakeManager::Shutdown(grpc_error_handle why) {
// on_handshake_done callback.
// Returns true if we've scheduled the on_handshake_done callback.
bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
gpr_log(GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
", args=%s",
this, grpc_error_std_string(error).c_str(), is_shutdown_, index_,
HandshakerArgsString(&args_).c_str());
}
grpc_handshaker_trace.Log(
GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR ", args=%s",
this, grpc_error_std_string(error).c_str(), is_shutdown_, index_,
HandshakerArgsString(&args_).c_str());
GPR_ASSERT(index_ <= handshakers_.size());
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
@ -117,12 +113,11 @@ bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
args_.read_buffer = nullptr;
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
gpr_log(GPR_INFO,
"handshake_manager %p: handshaking complete -- scheduling "
"on_handshake_done with error=%s",
this, grpc_error_std_string(error).c_str());
}
grpc_handshaker_trace.Log(
GPR_INFO,
"handshake_manager %p: handshaking complete -- scheduling "
"on_handshake_done with error=%s",
this, grpc_error_std_string(error).c_str());
// Cancel deadline timer, since we're invoking the on_handshake_done
// callback now.
grpc_timer_cancel(&deadline_timer_);
@ -130,12 +125,10 @@ bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
is_shutdown_ = true;
} else {
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), index_);
}
grpc_handshaker_trace.Log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
this, handshaker->name(), handshaker.get(), index_);
handshaker->DoHandshake(acceptor_, &call_next_handshaker_, &args_);
}
++index_;

@ -24,6 +24,7 @@
#include <grpc/support/atm.h>
#include <stdbool.h>
#include "src/core/lib/gpr/log_internal.h"
#include "src/core/lib/gprpp/global_config.h"
GPR_GLOBAL_CONFIG_DECLARE_STRING(grpc_trace);
@ -88,6 +89,16 @@ class TraceFlag {
bool enabled() { return false; }
#endif /* defined(GRPC_USE_TRACERS) || !defined(NDEBUG) */
void Log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) GPR_PRINT_FORMAT_CHECK(5, 6) {
if (GPR_UNLIKELY(enabled())) {
va_list args;
va_start(args, format);
gpr_vlog(file, line, severity, format, args);
va_end(args);
}
}
private:
friend void grpc_core::testing::grpc_tracer_enable_flag(TraceFlag* flag);
friend class TraceFlagList;
@ -120,6 +131,7 @@ class DebugOnlyTraceFlag {
}
constexpr bool enabled() const { return false; }
constexpr const char* name() const { return "DebugOnlyTraceFlag"; }
void Log(const char*, int, gpr_log_severity, const char*, ...) {}
private:
void set_enabled(bool /*enabled*/) {}

@ -27,6 +27,8 @@
#include <stdio.h>
#include <string.h>
#include "src/core/lib/gpr/log_internal.h"
static android_LogPriority severity_to_log_priority(gpr_log_severity severity) {
switch (severity) {
case GPR_LOG_SEVERITY_DEBUG:
@ -45,11 +47,16 @@ void gpr_log(const char* file, int line, gpr_log_severity severity,
if (gpr_should_log(severity) == 0) {
return;
}
char* message = NULL;
va_list args;
va_start(args, format);
vasprintf(&message, format, args);
gpr_vlog(file, line, severity, format, args);
va_end(args);
}
void gpr_vlog(const char* file, int line, gpr_log_severity severity,
const char* format, va_list args) {
char* message = NULL;
vasprintf(&message, format, args);
gpr_log_message(file, line, severity, message);
free(message);
}

@ -0,0 +1,25 @@
// Copyright 2021 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#ifndef GRPC_CORE_LIB_GPR_LOG_INTERNAL_H
#define GRPC_CORE_LIB_GPR_LOG_INTERNAL_H
#include "grpc/support/log.h"
/// Log a message, accepting a variadic argument list. See also \a gpr_log.
void gpr_vlog(const char* file, int line, gpr_log_severity severity,
const char* format, va_list args);
#endif // GRPC_CORE_LIB_GPR_LOG_INTERNAL_H

@ -42,6 +42,7 @@
#include <string>
#include "absl/strings/str_format.h"
#include "src/core/lib/gpr/log_internal.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gprpp/examine_stack.h"
@ -55,14 +56,19 @@ void gpr_log(const char* file, int line, gpr_log_severity severity,
if (gpr_should_log(severity) == 0) {
return;
}
char* message = nullptr;
va_list args;
va_start(args, format);
gpr_vlog(file, line, severity, format, args);
va_end(args);
}
void gpr_vlog(const char* file, int line, gpr_log_severity severity,
const char* format, va_list args) {
char* message = nullptr;
if (vasprintf(&message, format, args) == -1) {
va_end(args);
return;
}
va_end(args);
gpr_log_message(file, line, severity, message);
/* message has been allocated by vasprintf above, and needs free */
free(message);

@ -32,6 +32,7 @@
#include <string>
#include "absl/strings/str_format.h"
#include "src/core/lib/gpr/log_internal.h"
#include "src/core/lib/gprpp/examine_stack.h"
int gpr_should_log_stacktrace(gpr_log_severity severity);
@ -44,23 +45,28 @@ void gpr_log(const char* file, int line, gpr_log_severity severity,
if (gpr_should_log(severity) == 0) {
return;
}
char buf[64];
char* allocated = nullptr;
char* message = nullptr;
int ret;
va_list args;
va_start(args, format);
ret = vsnprintf(buf, sizeof(buf), format, args);
gpr_vlog(file, line, severity, format, args);
va_end(args);
}
void gpr_vlog(const char* file, int line, gpr_log_severity severity,
const char* format, va_list args) {
char buf[64];
char* allocated = nullptr;
char* message = nullptr;
// a va_list cannot be used twice.
va_list args_copy;
va_copy(args_copy, args);
int ret = vsnprintf(buf, sizeof(buf), format, args_copy);
if (ret < 0) {
message = nullptr;
} else if ((size_t)ret <= sizeof(buf) - 1) {
message = buf;
} else {
message = allocated = (char*)gpr_malloc((size_t)ret + 1);
va_start(args, format);
vsnprintf(message, (size_t)(ret + 1), format, args);
va_end(args);
}
gpr_log_message(file, line, severity, message);
gpr_free(allocated);

@ -29,6 +29,7 @@
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/lib/gpr/log_internal.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/string_windows.h"
#include "src/core/lib/gprpp/examine_stack.h"
@ -41,15 +42,16 @@ void gpr_log(const char* file, int line, gpr_log_severity severity,
if (gpr_should_log(severity) == 0) {
return;
}
char* message = NULL;
va_list args;
int ret;
/* Determine the length. */
va_start(args, format);
ret = _vscprintf(format, args);
gpr_vlog(file, line, severity, format, args);
va_end(args);
}
void gpr_vlog(const char* file, int line, gpr_log_severity severity,
const char* format, va_list args) {
char* message;
int ret = _vscprintf(format, args);
if (ret < 0) {
message = NULL;
} else {
@ -58,10 +60,8 @@ void gpr_log(const char* file, int line, gpr_log_severity severity,
message = (char*)gpr_malloc(strp_buflen);
/* Print to the buffer. */
va_start(args, format);
ret = vsnprintf_s(message, strp_buflen, _TRUNCATE, format, args);
va_end(args);
if ((size_t)ret != strp_buflen - 1) {
if (GPR_UNLIKELY((size_t)ret != strp_buflen - 1)) {
/* This should never happen. */
gpr_free(message);
message = NULL;

@ -319,10 +319,9 @@ static grpc_error_handle addbyte(grpc_http_parser* parser, uint8_t byte,
case GRPC_HTTP_FIRST_LINE:
case GRPC_HTTP_HEADERS:
if (parser->cur_line_length >= GRPC_HTTP_PARSER_MAX_HEADER_LENGTH) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http1_trace)) {
gpr_log(GPR_ERROR, "HTTP header max line length (%d) exceeded",
GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
}
grpc_http1_trace.Log(GPR_ERROR,
"HTTP header max line length (%d) exceeded",
GRPC_HTTP_PARSER_MAX_HEADER_LENGTH);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"HTTP header max line length exceeded");
}

@ -114,32 +114,25 @@ void CallCombiner::ScheduleClosure(grpc_closure* closure,
void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Start", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"==> CallCombiner::Start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
this, closure DEBUG_FMT_ARGS, reason,
grpc_error_std_string(error).c_str());
}
grpc_call_combiner_trace.Log(
GPR_INFO,
"==> CallCombiner::Start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s",
this, closure DEBUG_FMT_ARGS, reason,
grpc_error_std_string(error).c_str());
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)1));
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
grpc_call_combiner_trace.Log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR,
prev_size, prev_size + 1);
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
if (prev_size == 0) {
GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED();
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
grpc_call_combiner_trace.Log(GPR_INFO, " EXECUTING IMMEDIATELY");
// Queue was empty, so execute this closure immediately.
ScheduleClosure(closure, error);
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " QUEUING");
}
grpc_call_combiner_trace.Log(GPR_INFO, " QUEUING");
// Queue was not empty, so add closure to queue.
closure->error_data.error = error;
queue_.Push(
@ -149,43 +142,35 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
void CallCombiner::Stop(DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Stop", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, "==> CallCombiner::Stop() [%p] [" DEBUG_FMT_STR "%s]",
this DEBUG_FMT_ARGS, reason);
}
grpc_call_combiner_trace.Log(
GPR_INFO, "==> CallCombiner::Stop() [%p] [" DEBUG_FMT_STR "%s]",
this DEBUG_FMT_ARGS, reason);
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)-1));
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
grpc_call_combiner_trace.Log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR,
prev_size, prev_size - 1);
GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) {
while (true) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " checking queue");
}
grpc_call_combiner_trace.Log(GPR_INFO, " checking queue");
bool empty;
grpc_closure* closure =
reinterpret_cast<grpc_closure*>(queue_.PopAndCheckEnd(&empty));
if (closure == nullptr) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with Start().
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " queue returned no result; checking again");
}
grpc_call_combiner_trace.Log(
GPR_INFO, " queue returned no result; checking again");
continue;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure,
grpc_error_std_string(closure->error_data.error).c_str());
}
grpc_call_combiner_trace.Log(
GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s", closure,
grpc_error_std_string(closure->error_data.error).c_str());
ScheduleClosure(closure, closure->error_data.error);
break;
}
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " queue empty");
} else {
grpc_call_combiner_trace.Log(GPR_INFO, " queue empty");
}
}
@ -198,31 +183,27 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
this, closure);
}
grpc_call_combiner_trace.Log(
GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
this, closure);
ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&cancel_state_, original_state,
reinterpret_cast<gpr_atm>(closure))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
this, closure);
}
grpc_call_combiner_trace.Log(
GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p", this,
closure);
// If we replaced an earlier closure, invoke the original
// closure with GRPC_ERROR_NONE. This allows callers to clean
// up any resources they may be holding for the callback.
if (original_state != 0) {
closure = reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p", this,
closure);
}
grpc_call_combiner_trace.Log(
GPR_INFO, "call_combiner=%p: scheduling old cancel callback=%p",
this, closure);
ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_NONE);
}
break;
@ -246,11 +227,10 @@ void CallCombiner::Cancel(grpc_error_handle error) {
if (original_state != 0) {
grpc_closure* notify_on_cancel =
reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
this, notify_on_cancel);
}
grpc_call_combiner_trace.Log(
GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p", this,
notify_on_cancel);
ExecCtx::Run(DEBUG_LOCATION, notify_on_cancel, GRPC_ERROR_REF(error));
}
break;

@ -168,14 +168,12 @@ class CallCombinerClosureList {
GRPC_CALL_COMBINER_START(call_combiner, closure.closure, closure.error,
closure.reason);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"CallCombinerClosureList executing closure while already "
"holding call_combiner %p: closure=%p error=%s reason=%s",
call_combiner, closures_[0].closure,
grpc_error_std_string(closures_[0].error).c_str(),
closures_[0].reason);
}
grpc_call_combiner_trace.Log(
GPR_INFO,
"CallCombinerClosureList executing closure while already "
"holding call_combiner %p: closure=%p error=%s reason=%s",
call_combiner, closures_[0].closure,
grpc_error_std_string(closures_[0].error).c_str(), closures_[0].reason);
// This will release the call combiner.
ExecCtx::Run(DEBUG_LOCATION, closures_[0].closure, closures_[0].error);
closures_.clear();

@ -65,10 +65,8 @@ void CFStreamHandle::ReadCallback(CFReadStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(client_callback_info);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream ReadCallback (%p, %p, %lu, %p)", handle,
stream, type, client_callback_info);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream ReadCallback (%p, %p, %lu, %p)",
handle, stream, type, client_callback_info);
switch (type) {
case kCFStreamEventOpenCompleted:
handle->open_event_.SetReady();
@ -100,10 +98,8 @@ void CFStreamHandle::WriteCallback(CFWriteStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(clientCallBackInfo);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream WriteCallback (%p, %p, %lu, %p)", handle,
stream, type, clientCallBackInfo);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream WriteCallback (%p, %p, %lu, %p)",
handle, stream, type, clientCallBackInfo);
switch (type) {
case kCFStreamEventOpenCompleted:
handle->open_event_.SetReady();

@ -235,18 +235,15 @@ class Closure {
return;
}
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: run [%s:%d]",
closure, closure->file_created, closure->line_created,
location.file(), location.line());
}
grpc_trace_closure.Log(
GPR_DEBUG, "running closure %p: created [%s:%d]: run [%s:%d]", closure,
closure->file_created, closure->line_created, location.file(),
location.line());
GPR_ASSERT(closure->cb != nullptr);
#endif
closure->cb(closure->cb_arg, error);
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
}
grpc_trace_closure.Log(GPR_DEBUG, "closure %p finished", closure);
#endif
GRPC_ERROR_UNREF(error);
}

@ -35,13 +35,6 @@
grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
#define GRPC_COMBINER_TRACE(fn) \
do { \
if (grpc_combiner_trace.enabled()) { \
fn; \
} \
} while (0)
#define STATE_UNORPHANED 1
#define STATE_ELEM_COUNT_LOW_BIT 2
@ -59,33 +52,34 @@ grpc_core::Combiner* grpc_combiner_create(void) {
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock, nullptr);
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
grpc_combiner_trace.Log(GPR_INFO, "C:%p create", lock);
return lock;
}
static void really_destroy(grpc_core::Combiner* lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
grpc_combiner_trace.Log(GPR_INFO, "C:%p really_destroy", lock);
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
delete lock;
}
static void start_destroy(grpc_core::Combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log(
GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
grpc_combiner_trace.Log(GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR,
lock, old_state);
if (old_state == 1) {
really_destroy(lock);
}
}
#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
if (grpc_combiner_trace.enabled()) { \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
"C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
gpr_atm_no_barrier_load(&lock->refs.count), \
gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
}
#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
do { \
grpc_combiner_trace.Log( \
file, line, GPR_LOG_SEVERITY_DEBUG, \
"C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
gpr_atm_no_barrier_load(&lock->refs.count), \
gpr_atm_no_barrier_load(&lock->refs.count) + (delta), reason); \
} while (0)
#else
#define GRPC_COMBINER_DEBUG_SPAM(op, delta)
#endif
@ -131,9 +125,9 @@ static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl,
GPR_TIMER_SCOPE("combiner.execute", 0);
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last));
grpc_combiner_trace.Log(GPR_INFO,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last);
if (last == 1) {
GRPC_STATS_INC_COMBINER_LOCKS_INITIATED();
GPR_TIMER_MARK("combiner.initiated", 0);
@ -177,7 +171,7 @@ static void offload(void* arg, grpc_error_handle /*error*/) {
static void queue_offload(grpc_core::Combiner* lock) {
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
move_next();
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
grpc_combiner_trace.Log(GPR_INFO, "C:%p queue_offload", lock);
grpc_core::Executor::Run(&lock->offload, GRPC_ERROR_NONE);
}
@ -192,14 +186,14 @@ bool grpc_combiner_continue_exec_ctx() {
bool contended =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_continue_exec_ctx "
"contended=%d "
"exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, contended,
grpc_core::ExecCtx::Get()->IsReadyToFinish(),
lock->time_to_execute_final_list));
grpc_combiner_trace.Log(GPR_INFO,
"C:%p grpc_combiner_continue_exec_ctx "
"contended=%d "
"exec_ctx_ready_to_finish=%d "
"time_to_execute_final_list=%d",
lock, contended,
grpc_core::ExecCtx::Get()->IsReadyToFinish(),
lock->time_to_execute_final_list);
// offload only if all the following conditions are true:
// 1. the combiner is contended and has more than one closure to execute
@ -221,8 +215,7 @@ bool grpc_combiner_continue_exec_ctx() {
// priority
(gpr_atm_acq_load(&lock->state) >> 1) > 1) {
grpc_core::MultiProducerSingleConsumerQueue::Node* n = lock->queue.Pop();
GRPC_COMBINER_TRACE(
gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
grpc_combiner_trace.Log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n);
if (n == nullptr) {
// queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later)
@ -245,8 +238,8 @@ bool grpc_combiner_continue_exec_ctx() {
int loops = 0;
while (c != nullptr) {
GPR_TIMER_SCOPE("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE(
gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_combiner_trace.Log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock,
loops, c);
grpc_closure* next = c->next_data.next;
grpc_error_handle error = c->error_data.error;
#ifndef NDEBUG
@ -263,8 +256,8 @@ bool grpc_combiner_continue_exec_ctx() {
lock->time_to_execute_final_list = false;
gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(
gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
grpc_combiner_trace.Log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock,
old_state);
// Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \
@ -307,9 +300,9 @@ static void combiner_finally_exec(grpc_core::Combiner* lock,
GPR_ASSERT(lock != nullptr);
GPR_TIMER_SCOPE("combiner.execute_finally", 0);
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS();
GRPC_COMBINER_TRACE(gpr_log(
grpc_combiner_trace.Log(
GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
grpc_core::ExecCtx::Get()->combiner_data()->active_combiner);
if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0);
// Using error_data.scratch to store the combiner so that it can be accessed

@ -136,11 +136,10 @@ static void CallReadCb(CFStreamEndpoint* ep, grpc_error_handle error) {
}
static void CallWriteCb(CFStreamEndpoint* ep, grpc_error_handle error) {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p call_write_cb %p %p:%p", ep,
ep->write_cb, ep->write_cb->cb, ep->write_cb->cb_arg);
gpr_log(GPR_DEBUG, "write: error=%s", grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream endpoint:%p call_write_cb %p %p:%p",
ep, ep->write_cb, ep->write_cb->cb, ep->write_cb->cb_arg);
grpc_tcp_trace.Log(GPR_DEBUG, "write: error=%s",
grpc_error_std_string(error).c_str());
grpc_closure* cb = ep->write_cb;
ep->write_cb = nullptr;
ep->write_slices = nullptr;
@ -253,10 +252,8 @@ static void CFStreamReadAllocationDone(void* arg, grpc_error_handle error) {
static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, bool urgent) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p read (%p, %p) length:%zu", ep_impl,
slices, cb, slices->length);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream endpoint:%p read (%p, %p) length:%zu",
ep_impl, slices, cb, slices->length);
GPR_ASSERT(ep_impl->read_cb == nullptr);
ep_impl->read_cb = cb;
ep_impl->read_slices = slices;
@ -273,10 +270,9 @@ static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
static void CFStreamWrite(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, void* arg) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p write (%p, %p) length:%zu",
ep_impl, slices, cb, slices->length);
}
grpc_tcp_trace.Log(GPR_DEBUG,
"CFStream endpoint:%p write (%p, %p) length:%zu", ep_impl,
slices, cb, slices->length);
GPR_ASSERT(ep_impl->write_cb == nullptr);
ep_impl->write_cb = cb;
ep_impl->write_slices = slices;
@ -286,22 +282,18 @@ static void CFStreamWrite(grpc_endpoint* ep, grpc_slice_buffer* slices,
void CFStreamShutdown(grpc_endpoint* ep, grpc_error_handle why) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p shutdown (%p)", ep_impl, why);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream endpoint:%p shutdown (%p)", ep_impl,
why);
CFReadStreamClose(ep_impl->read_stream);
CFWriteStreamClose(ep_impl->write_stream);
ep_impl->stream_sync->Shutdown(why);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p shutdown DONE (%p)", ep_impl, why);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream endpoint:%p shutdown DONE (%p)",
ep_impl, why);
}
void CFStreamDestroy(grpc_endpoint* ep) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p destroy", ep_impl);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CFStream endpoint:%p destroy", ep_impl);
EP_UNREF(ep_impl, "destroy");
}
@ -341,11 +333,9 @@ grpc_endpoint* grpc_cfstream_endpoint_create(
const char* peer_string, grpc_slice_allocator* slice_allocator,
CFStreamHandle* stream_sync) {
CFStreamEndpoint* ep_impl = new CFStreamEndpoint;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG,
"CFStream endpoint:%p create readStream:%p writeStream: %p",
ep_impl, read_stream, write_stream);
}
grpc_tcp_trace.Log(
GPR_DEBUG, "CFStream endpoint:%p create readStream:%p writeStream: %p",
ep_impl, read_stream, write_stream);
ep_impl->base.vtable = &vtable;
gpr_ref_init(&ep_impl->refcount, 1);
ep_impl->read_stream = read_stream;

@ -231,11 +231,10 @@ static const char* error_time_name(grpc_error_times key) {
#ifndef NDEBUG
grpc_error_handle grpc_error_do_ref(grpc_error_handle err, const char* file,
int line) {
if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line);
}
grpc_trace_error_refcount.Log(
GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) + 1, file, line);
gpr_ref(&err->atomics.refs);
return err;
}
@ -279,11 +278,10 @@ static void error_destroy(grpc_error_handle err) {
#ifndef NDEBUG
void grpc_error_do_unref(grpc_error_handle err, const char* file, int line) {
if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line);
}
grpc_trace_error_refcount.Log(
GPR_DEBUG, "%p: %" PRIdPTR " -> %" PRIdPTR " [%s:%d]", err,
gpr_atm_no_barrier_load(&err->atomics.refs.count),
gpr_atm_no_barrier_load(&err->atomics.refs.count) - 1, file, line);
if (gpr_unref(&err->atomics.refs)) {
error_destroy(err);
}
@ -305,18 +303,12 @@ static uint8_t get_placement(grpc_error_handle* err, size_t size) {
if ((*err)->arena_size + slots > (*err)->arena_capacity) {
return UINT8_MAX;
}
#ifndef NDEBUG
grpc_error_handle orig = *err;
#endif
*err = static_cast<grpc_error_handle>(gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t)));
#ifndef NDEBUG
if (grpc_trace_error_refcount.enabled()) {
if (*err != orig) {
gpr_log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
}
if (*err != orig) {
grpc_trace_error_refcount.Log(GPR_DEBUG, "realloc %p -> %p", orig, *err);
}
#endif
}
uint8_t placement = (*err)->arena_size;
(*err)->arena_size = static_cast<uint8_t>((*err)->arena_size + slots);
@ -434,9 +426,8 @@ grpc_error_handle grpc_error_create(const char* file, int line,
file, line);
abort();
}
if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p create [%s:%d]", err, file, line);
}
grpc_trace_error_refcount.Log(GPR_DEBUG, "%p create [%s:%d]", err, file,
line);
#endif
err->arena_size = 0;
@ -517,11 +508,7 @@ static grpc_error_handle copy_error_and_unref(grpc_error_handle in) {
}
out = static_cast<grpc_error_handle>(
gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t)));
#ifndef NDEBUG
if (grpc_trace_error_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
}
#endif
grpc_trace_error_refcount.Log(GPR_DEBUG, "%p create copying %p", out, in);
// bulk memcpy of the rest of the struct.
// NOLINTNEXTLINE(bugprone-sizeof-expression)
size_t skip = sizeof(&out->atomics);

@ -42,10 +42,8 @@
grpc_core::DebugOnlyTraceFlag grpc_apple_polling_trace(false, "apple_polling");
#ifndef NDEBUG
#define GRPC_POLLING_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_apple_polling_trace)) { \
gpr_log(GPR_DEBUG, "(polling) " format, __VA_ARGS__); \
}
#define GRPC_POLLING_TRACE(format, ...) \
grpc_apple_polling_trace.Log(GPR_DEBUG, "(polling) " format, __VA_ARGS__);
#else
#define GRPC_POLLING_TRACE(...)
#endif // NDEBUG

@ -355,11 +355,8 @@ static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
std::string fd_name = absl::StrCat(name, " fd=", fd);
grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name.c_str());
fork_fd_list_add_grpc_fd(new_fd);
#ifndef NDEBUG
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name.c_str());
}
#endif
grpc_trace_fd_refcount.Log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd,
fd_name.c_str());
struct epoll_event ev;
ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
@ -732,9 +729,7 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
}
grpc_polling_trace.Log(GPR_INFO, "ps: %p poll got %d events", ps, r);
gpr_atm_rel_store(&g_epoll_set.num_events, r);
gpr_atm_rel_store(&g_epoll_set.cursor, 0);
@ -752,9 +747,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the
@ -771,11 +764,9 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state),
is_reassigning);
}
grpc_polling_trace.Log(
GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state), is_reassigning);
if (pollset->seen_inactive) {
if (neighborhood != pollset->neighborhood) {
gpr_mu_unlock(&neighborhood->mu);
@ -824,11 +815,9 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down);
}
grpc_polling_trace.Log(
GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", pollset,
worker, kick_state_string(worker->state), pollset->shutting_down);
if (gpr_cv_wait(&worker->cv, &pollset->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
@ -841,13 +830,12 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_core::ExecCtx::Get()->InvalidateNow();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down, pollset->kicked_without_poller);
}
grpc_polling_trace.Log(GPR_INFO,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->state),
pollset->shutting_down,
pollset->kicked_without_poller);
/* We release pollset lock in this function at a couple of places:
* 1. Briefly when assigning pollset to a neighborhood
@ -885,10 +873,8 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(
&g_active_poller, 0,
reinterpret_cast<gpr_atm>(inspect_worker))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker);
}
grpc_polling_trace.Log(
GPR_INFO, " .. choose next poller to be %p", inspect_worker);
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
@ -896,9 +882,8 @@ static bool check_neighborhood_for_available_poller(
gpr_cv_signal(&inspect_worker->cv);
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. beaten to choose next poller");
}
grpc_polling_trace.Log(GPR_INFO,
" .. beaten to choose next poller");
}
// even if we didn't win the cas, there's a worker, we can stop
found_worker = true;
@ -914,9 +899,7 @@ static bool check_neighborhood_for_available_poller(
} while (!found_worker && inspect_worker != inspect->root_worker);
}
if (!found_worker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
}
grpc_polling_trace.Log(GPR_INFO, " .. mark pollset %p inactive", inspect);
inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) {
neighborhood->active_root =
@ -934,9 +917,7 @@ static bool check_neighborhood_for_available_poller(
static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
GPR_TIMER_SCOPE("end_worker", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
if (worker_hdl != nullptr) *worker_hdl = nullptr;
/* Make sure we appear kicked */
SET_KICK_STATE(worker, KICKED);
@ -945,9 +926,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) ==
reinterpret_cast<gpr_atm>(worker)) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. choose next poller to be peer %p",
worker);
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
@ -997,9 +977,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. remove worker");
}
grpc_polling_trace.Log(GPR_INFO, " .. remove worker");
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset);
}
@ -1092,24 +1070,18 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
if (root_worker == nullptr) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked_without_poller");
}
grpc_polling_trace.Log(GPR_INFO, " .. kicked_without_poller");
goto done;
}
grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. already kicked %p", root_worker);
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. already kicked %p", next_worker);
SET_KICK_STATE(next_worker, KICKED);
goto done;
} else if (root_worker == next_worker && // only try and wake up a poller
@ -1118,29 +1090,23 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", root_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. kicked %p", root_worker);
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", next_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. kicked %p", next_worker);
GPR_ASSERT(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
} else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(
GPR_INFO,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
}
grpc_polling_trace.Log(
GPR_INFO,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
@ -1149,10 +1115,8 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. non-root poller %p (root=%p)",
next_worker, root_worker);
SET_KICK_STATE(next_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
@ -1165,9 +1129,7 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked while waking up");
}
grpc_polling_trace.Log(GPR_INFO, " .. kicked while waking up");
goto done;
}
@ -1175,40 +1137,30 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
if (specific_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. specific worker already kicked");
}
grpc_polling_trace.Log(GPR_INFO, " .. specific worker already kicked");
goto done;
} else if (g_current_thread_worker == specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
}
grpc_polling_trace.Log(GPR_INFO, " .. mark %p kicked", specific_worker);
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick active poller");
}
grpc_polling_trace.Log(GPR_INFO, " .. kick active poller");
SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick waiting worker");
}
grpc_polling_trace.Log(GPR_INFO, " .. kick waiting worker");
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick non-waiting worker");
}
grpc_polling_trace.Log(GPR_INFO, " .. kick non-waiting worker");
SET_KICK_STATE(specific_worker, KICKED);
goto done;
}

@ -171,11 +171,8 @@ struct grpc_fd {
std::string fd_name = absl::StrCat(name, " fd=", fd);
grpc_iomgr_register_object(&iomgr_object, fd_name.c_str());
#ifndef NDEBUG
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, this, fd_name.c_str());
}
#endif
grpc_trace_fd_refcount.Log(GPR_DEBUG, "FD %d %p create %s", fd, this,
fd_name.c_str());
}
// This is really the dtor, but the poller threads waking up from
@ -349,12 +346,10 @@ static gpr_mu fd_freelist_mu;
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
"FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
}
grpc_trace_fd_refcount.Log(
GPR_DEBUG, "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) \
do { \
@ -386,12 +381,10 @@ static void fd_destroy(void* arg, grpc_error_handle /*error*/) {
#ifndef NDEBUG
static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
grpc_trace_fd_refcount.Log(
GPR_DEBUG, "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd* fd, int n) {
#endif
@ -613,9 +606,8 @@ static grpc_error_handle pollable_add_fd(pollable* p, grpc_fd* fd) {
grpc_error_handle error = GRPC_ERROR_NONE;
static const char* err_desc = "pollable_add_fd";
const int epfd = p->epfd;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
}
grpc_polling_trace.Log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd,
p);
struct epoll_event ev_fd;
ev_fd.events =
@ -657,13 +649,12 @@ static void pollset_global_shutdown(void) {
/* pollset->mu must be held while calling this function */
static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)",
pollset, pollset->active_pollable, pollset->shutdown_closure,
pollset->root_worker, pollset->containing_pollset_set_count);
}
grpc_polling_trace.Log(
GPR_INFO,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)",
pollset, pollset->active_pollable, pollset->shutdown_closure,
pollset->root_worker, pollset->containing_pollset_set_count);
if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
pollset->containing_pollset_set_count == 0) {
GPR_TIMER_MARK("pollset_finish_shutdown", 0);
@ -683,34 +674,27 @@ static grpc_error_handle kick_one_worker(grpc_pollset_worker* specific_worker) {
grpc_core::MutexLockForGprMu lock(&p->mu);
GPR_ASSERT(specific_worker != nullptr);
if (specific_worker->kicked) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked",
p);
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE;
}
if (g_current_thread_worker == specific_worker) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
specific_worker->kicked = true;
return GRPC_ERROR_NONE;
}
if (specific_worker == p->root_worker) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
specific_worker->kicked = true;
grpc_error_handle error = grpc_wakeup_fd_wakeup(&p->wakeup);
return error;
}
if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
specific_worker->kicked = true;
gpr_cv_signal(&specific_worker->cv);
return GRPC_ERROR_NONE;
@ -724,19 +708,17 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GPR_TIMER_SCOPE("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
static_cast<void*>(g_current_thread_pollset),
static_cast<void*>(g_current_thread_worker), pollset->root_worker);
}
grpc_polling_trace.Log(
GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker, static_cast<void*>(g_current_thread_pollset),
static_cast<void*>(g_current_thread_worker), pollset->root_worker);
if (specific_worker == nullptr) {
if (g_current_thread_pollset != pollset) {
if (pollset->root_worker == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_any_without_poller",
pollset);
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
return GRPC_ERROR_NONE;
@ -760,9 +742,7 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
pollset->root_worker->links[PWLINK_POLLSET].next);
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
return GRPC_ERROR_NONE;
}
@ -880,9 +860,8 @@ static grpc_error_handle pollable_process_events(grpc_pollset* pollset,
struct epoll_event* ev = &pollable_obj->events[n];
void* data_ptr = ev->data.ptr;
if (1 & reinterpret_cast<intptr_t>(data_ptr)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset,
data_ptr);
append_error(
&error,
grpc_wakeup_fd_consume_wakeup(reinterpret_cast<grpc_wakeup_fd*>(
@ -899,12 +878,10 @@ static grpc_error_handle pollable_process_events(grpc_pollset* pollset,
bool write_ev = (ev->events & EPOLLOUT) != 0;
bool err_fallback = error && !track_err;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p got fd %p: cancel=%d read=%d "
"write=%d",
pollset, fd, cancel, read_ev, write_ev);
}
grpc_polling_trace.Log(GPR_INFO,
"PS:%p got fd %p: cancel=%d read=%d "
"write=%d",
pollset, fd, cancel, read_ev, write_ev);
if (error && !err_fallback) {
fd_has_errors(fd);
}
@ -931,10 +908,8 @@ static grpc_error_handle pollable_epoll(pollable* p, grpc_millis deadline) {
GPR_TIMER_SCOPE("pollable_epoll", 0);
int timeout = poll_deadline_to_millis_timeout(deadline);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p,
pollable_desc(p).c_str(), timeout);
}
grpc_polling_trace.Log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p,
pollable_desc(p).c_str(), timeout);
if (timeout != 0) {
GRPC_SCHEDULING_START_BLOCKING_REGION;
@ -950,9 +925,7 @@ static grpc_error_handle pollable_epoll(pollable* p, grpc_millis deadline) {
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
}
grpc_polling_trace.Log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
p->event_cursor = 0;
p->event_count = r;
@ -1029,21 +1002,16 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
while (do_poll && worker->pollable_obj->root_worker != worker) {
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker);
do_poll = false;
} else if (worker->kicked) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
}
grpc_polling_trace.Log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
do_poll = false;
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace) &&
worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable_obj, worker);
} else if (worker->pollable_obj->root_worker != worker) {
grpc_polling_trace.Log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p",
pollset, worker->pollable_obj, worker);
}
}
grpc_core::ExecCtx::Get()->InvalidateNow();
@ -1113,13 +1081,12 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
#ifndef NDEBUG
WORKER_PTR->originator = sys_gettid();
#endif
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
}
grpc_polling_trace.Log(
GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRId64 " deadline=%" PRId64
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
static const char* err_desc = "pollset_work";
grpc_error_handle error = GRPC_ERROR_NONE;
if (pollset->kicked_without_poller) {
@ -1154,11 +1121,9 @@ static grpc_error_handle pollset_transition_pollable_from_empty_to_fd_locked(
grpc_pollset* pollset, grpc_fd* fd) {
static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error_handle error = GRPC_ERROR_NONE;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
}
grpc_polling_trace.Log(
GPR_INFO, "PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd);
append_error(&error, pollset_kick_all(pollset), err_desc);
POLLABLE_UNREF(pollset->active_pollable, "pollset");
append_error(&error, get_fd_pollable(fd, &pollset->active_pollable),
@ -1170,13 +1135,11 @@ static grpc_error_handle pollset_transition_pollable_from_fd_to_multi_locked(
grpc_pollset* pollset, grpc_fd* and_add_fd) {
static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
grpc_error_handle error = GRPC_ERROR_NONE;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(
GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd);
}
grpc_polling_trace.Log(
GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd);
append_error(&error, pollset_kick_all(pollset), err_desc);
grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
POLLABLE_UNREF(pollset->active_pollable, "pollset");
@ -1243,11 +1206,9 @@ static grpc_error_handle pollset_as_multipollable_locked(
error = pollable_create(PO_MULTI, &pollset->active_pollable);
/* Any workers currently polling on this pollset must now be woked up so
* that they can pick up the new active_pollable */
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p active pollable transition from empty to multi",
pollset);
}
grpc_polling_trace.Log(
GPR_INFO, "PS:%p active pollable transition from empty to multi",
pollset);
static const char* err_desc =
"pollset_as_multipollable_locked: empty -> multi";
append_error(&error, pollset_kick_all(pollset), err_desc);
@ -1347,9 +1308,7 @@ static void pollset_set_unref(grpc_pollset_set* pss) {
static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
}
grpc_polling_trace.Log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
grpc_error_handle error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
pss = pss_lock_adam(pss);
@ -1371,9 +1330,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
}
grpc_polling_trace.Log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
pss = pss_lock_adam(pss);
size_t i;
for (i = 0; i < pss->fd_count; i++) {
@ -1392,9 +1349,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
}
grpc_polling_trace.Log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
pss = pss_lock_adam(pss);
size_t i;
for (i = 0; i < pss->pollset_count; i++) {
@ -1445,9 +1400,7 @@ static grpc_error_handle add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
}
grpc_polling_trace.Log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
grpc_error_handle error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_pollset";
pollable* pollable_obj = nullptr;
@ -1482,9 +1435,7 @@ static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
static void pollset_set_add_pollset_set(grpc_pollset_set* a,
grpc_pollset_set* b) {
GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
}
grpc_polling_trace.Log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
grpc_error_handle error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd";
for (;;) {
@ -1516,9 +1467,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
if (b_size > a_size) {
GPR_SWAP(grpc_pollset_set*, a, b);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
}
grpc_polling_trace.Log(GPR_INFO, "PSS: parent %p to %p", b, a);
a->refs.Ref();
b->parent = a;
if (a->fd_capacity < a->fd_count + b->fd_count) {

@ -319,12 +319,10 @@ static void fork_fd_list_add_wakeup_fd(grpc_cached_wakeup_fd* fd) {
#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
"FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
}
grpc_trace_fd_refcount.Log(
GPR_DEBUG, "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) \
do { \
@ -344,12 +342,10 @@ static void ref_by(grpc_fd* fd, int n) {
#ifndef NDEBUG
static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fd_refcount)) {
gpr_log(GPR_DEBUG,
"FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
}
grpc_trace_fd_refcount.Log(
GPR_DEBUG, "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd* fd, int n) {
#endif
@ -574,9 +570,8 @@ static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
}
static void fd_notify_on_error(grpc_fd* /*fd*/, grpc_closure* closure) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "Polling engine does not support tracking errors.");
}
grpc_polling_trace.Log(GPR_ERROR,
"Polling engine does not support tracking errors.");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_CANCELLED);
}
@ -593,9 +588,8 @@ static void fd_set_writable(grpc_fd* fd) {
}
static void fd_set_error(grpc_fd* /*fd*/) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "Polling engine does not support tracking errors.");
}
grpc_polling_trace.Log(GPR_ERROR,
"Polling engine does not support tracking errors.");
}
static uint32_t fd_begin_poll(grpc_fd* fd, grpc_pollset* pollset,
@ -1019,9 +1013,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "%p poll=%d", pollset, r);
}
grpc_polling_trace.Log(GPR_INFO, "%p poll=%d", pollset, r);
if (r < 0) {
if (errno != EINTR) {
@ -1043,9 +1035,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "%p: got_wakeup", pollset);
}
grpc_polling_trace.Log(GPR_INFO, "%p: got_wakeup", pollset);
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@ -1053,11 +1043,10 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
if (watchers[i].fd == nullptr) {
fd_end_poll(&watchers[i], 0, 0);
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
grpc_polling_trace.Log(
GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
/* This is a mitigation to prevent poll() from spinning on a
** POLLHUP https://github.com/grpc/grpc/pull/13665
*/

@ -54,10 +54,8 @@ grpc_core::DebugOnlyTraceFlag grpc_polling_api_trace(false, "polling_api");
// Polling API trace only enabled in debug builds
#ifndef NDEBUG
#define GRPC_POLLING_API_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_api_trace)) { \
gpr_log(GPR_INFO, "(polling-api) " format, __VA_ARGS__); \
}
#define GRPC_POLLING_API_TRACE(format, ...) \
grpc_polling_api_trace.Log(GPR_INFO, "(polling-api) " format, __VA_ARGS__);
#else
#define GRPC_POLLING_API_TRACE(...)
#endif // NDEBUG

@ -36,10 +36,8 @@ extern grpc_core::DebugOnlyTraceFlag grpc_fd_trace; /* Disabled by default */
extern grpc_core::DebugOnlyTraceFlag
grpc_polling_trace; /* Disabled by default */
#define GRPC_FD_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_fd_trace)) { \
gpr_log(GPR_INFO, "(fd-trace) " format, __VA_ARGS__); \
}
#define GRPC_FD_TRACE(format, ...) \
grpc_fd_trace.Log(GPR_INFO, "(fd-trace) " format, __VA_ARGS__);
typedef struct grpc_fd grpc_fd;

@ -30,19 +30,17 @@ EventEngine::Callback GrpcClosureToCallback(grpc_closure* closure,
grpc_error_add_child(error, absl_status_to_grpc_error(status));
#ifndef NDEBUG
closure->scheduled = false;
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG,
"EventEngine: running closure %p: created [%s:%d]: %s [%s:%d]",
closure, closure->file_created, closure->line_created,
closure->run ? "run" : "scheduled", closure->file_initiated,
closure->line_initiated);
}
grpc_trace_closure.Log(
GPR_DEBUG,
"EventEngine: running closure %p: created [%s:%d]: %s [%s:%d]", closure,
closure->file_created, closure->line_created,
closure->run ? "run" : "scheduled", closure->file_initiated,
closure->line_initiated);
#endif
closure->cb(closure->cb_arg, new_error);
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "EventEngine: closure %p finished", closure);
}
grpc_trace_closure.Log(GPR_DEBUG, "EventEngine: closure %p finished",
closure);
#endif
GRPC_ERROR_UNREF(error);
grpc_pollset_ee_broadcast_event();

@ -33,18 +33,15 @@
static void exec_ctx_run(grpc_closure* closure, grpc_error_handle error) {
#ifndef NDEBUG
closure->scheduled = false;
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: %s [%s:%d]",
closure, closure->file_created, closure->line_created,
closure->run ? "run" : "scheduled", closure->file_initiated,
closure->line_initiated);
}
grpc_trace_closure.Log(GPR_DEBUG,
"running closure %p: created [%s:%d]: %s [%s:%d]",
closure, closure->file_created, closure->line_created,
closure->run ? "run" : "scheduled",
closure->file_initiated, closure->line_initiated);
#endif
closure->cb(closure->cb_arg, error);
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
}
grpc_trace_closure.Log(GPR_DEBUG, "closure %p finished", closure);
#endif
GRPC_ERROR_UNREF(error);
}

@ -95,11 +95,9 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
* sure that the shutdown error has been initialized properly before us
* referencing it. */
gpr_atm curr = gpr_atm_acq_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"LockfreeEvent::NotifyOn: %p curr=%" PRIxPTR " closure=%p", this,
curr, closure);
}
grpc_polling_trace.Log(
GPR_DEBUG, "LockfreeEvent::NotifyOn: %p curr=%" PRIxPTR " closure=%p",
this, curr, closure);
switch (curr) {
case kClosureNotReady: {
/* kClosureNotReady -> <closure>.
@ -164,11 +162,9 @@ bool LockfreeEvent::SetShutdown(grpc_error_handle shutdown_error) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG,
"LockfreeEvent::SetShutdown: %p curr=%" PRIxPTR " err=%s",
&state_, curr, grpc_error_std_string(shutdown_error).c_str());
}
grpc_polling_trace.Log(
GPR_DEBUG, "LockfreeEvent::SetShutdown: %p curr=%" PRIxPTR " err=%s",
&state_, curr, grpc_error_std_string(shutdown_error).c_str());
switch (curr) {
case kClosureReady:
case kClosureNotReady:
@ -214,10 +210,8 @@ void LockfreeEvent::SetReady() {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "LockfreeEvent::SetReady: %p curr=%" PRIxPTR, &state_,
curr);
}
grpc_polling_trace.Log(
GPR_DEBUG, "LockfreeEvent::SetReady: %p curr=%" PRIxPTR, &state_, curr);
switch (curr) {
case kClosureReady: {

@ -321,13 +321,12 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64 " outstanding_allocations=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
resource_user->free_pool, resource_user->outstanding_allocations);
}
grpc_resource_quota_trace.Log(
GPR_INFO,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64 " outstanding_allocations=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
resource_user->free_pool, resource_user->outstanding_allocations);
if (gpr_atm_no_barrier_load(&resource_user->shutdown)) {
resource_user->allocating = false;
grpc_closure_list_fail_all(
@ -349,13 +348,11 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
resource_user->free_pool = 0;
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(), amt,
resource_quota->free_pool);
}
grpc_resource_quota_trace.Log(
GPR_INFO,
"RQ %s %s: grant alloc %" PRId64 " bytes; rq_free_pool -> %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(), amt,
resource_quota->free_pool);
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace) &&
resource_user->free_pool >= 0) {
gpr_log(GPR_INFO, "RQ %s %s: discard already satisfied alloc request",
@ -388,23 +385,21 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_user->free_pool = 0;
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(), amt,
resource_quota->free_pool);
}
grpc_resource_quota_trace.Log(
GPR_INFO,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(), amt,
resource_quota->free_pool);
gpr_mu_unlock(&resource_user->mu);
return true;
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"RQ %s %s: failed to reclaim_from_per_user_free_pool; "
"free_pool = %" PRId64 "; rq_free_pool = %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(),
resource_user->free_pool, resource_quota->free_pool);
}
grpc_resource_quota_trace.Log(
GPR_INFO,
"RQ %s %s: failed to reclaim_from_per_user_free_pool; "
"free_pool = %" PRId64 "; rq_free_pool = %" PRId64,
resource_quota->name.c_str(), resource_user->name.c_str(),
resource_user->free_pool, resource_quota->free_pool);
gpr_mu_unlock(&resource_user->mu);
}
}
@ -418,11 +413,10 @@ static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
: GRPC_RULIST_RECLAIMER_BENIGN;
grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == nullptr) return false;
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RQ %s %s: initiate %s reclamation",
resource_quota->name.c_str(), resource_user->name.c_str(),
destructive ? "destructive" : "benign");
}
grpc_resource_quota_trace.Log(GPR_INFO, "RQ %s %s: initiate %s reclamation",
resource_quota->name.c_str(),
resource_user->name.c_str(),
destructive ? "destructive" : "benign");
resource_quota->reclaiming = true;
grpc_resource_quota_ref_internal(resource_quota);
grpc_closure* c = resource_user->reclaimers[destructive];
@ -552,9 +546,7 @@ static void ru_post_destructive_reclaimer(void* ru,
}
static void ru_shutdown(void* ru, grpc_error_handle /*error*/) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU shutdown %p", ru);
}
grpc_resource_quota_trace.Log(GPR_INFO, "RU shutdown %p", ru);
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
gpr_mu_lock(&resource_user->mu);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, resource_user->reclaimers[0],
@ -592,10 +584,8 @@ static void ru_destroy(void* ru, grpc_error_handle /*error*/) {
}
grpc_resource_quota_unref_internal(resource_user->resource_quota);
gpr_mu_destroy(&resource_user->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) destroyed", resource_user->name.c_str(),
resource_user);
}
grpc_resource_quota_trace.Log(GPR_INFO, "RU '%s' (%p) destroyed",
resource_user->name.c_str(), resource_user);
delete resource_user;
}
@ -810,10 +800,8 @@ grpc_resource_user* grpc_resource_user_create(
resource_user->name = absl::StrCat(
"anonymous_resource_user_", reinterpret_cast<intptr_t>(resource_user));
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) created", resource_user->name.c_str(),
resource_user);
}
grpc_resource_quota_trace.Log(GPR_INFO, "RU '%s' (%p) created",
resource_user->name.c_str(), resource_user);
return resource_user;
}
@ -825,10 +813,9 @@ grpc_resource_quota* grpc_resource_user_quota(
static void ru_ref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
gpr_atm prior = gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) reffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, prior, prior + amount);
}
grpc_resource_quota_trace.Log(
GPR_INFO, "RU '%s' (%p) reffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, prior, prior + amount);
GPR_ASSERT(prior != 0);
}
@ -836,10 +823,9 @@ static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) unreffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, old, old - amount);
}
grpc_resource_quota_trace.Log(
GPR_INFO, "RU '%s' (%p) unreffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, old, old - amount);
if (old == amount) {
resource_user->resource_quota->combiner->Run(
&resource_user->destroy_closure, GRPC_ERROR_NONE);
@ -903,11 +889,10 @@ static bool resource_user_alloc_locked(grpc_resource_user* resource_user,
grpc_closure* optional_on_done) {
ru_ref_by(resource_user, static_cast<gpr_atm>(size));
resource_user->free_pool -= static_cast<int64_t>(size);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name.c_str(),
resource_user->name.c_str(), size, resource_user->free_pool);
}
grpc_resource_quota_trace.Log(
GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name.c_str(), resource_user->name.c_str(),
size, resource_user->free_pool);
if (GPR_LIKELY(resource_user->free_pool >= 0)) return true;
// Slow path: We need to wait for the free pool to refill.
if (optional_on_done != nullptr) {
@ -964,11 +949,10 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
GPR_ASSERT(prior >= static_cast<long>(size));
bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name.c_str(),
resource_user->name.c_str(), size, resource_user->free_pool);
}
grpc_resource_quota_trace.Log(
GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name.c_str(), resource_user->name.c_str(),
size, resource_user->free_pool);
bool is_bigger_than_zero = resource_user->free_pool > 0;
if (is_bigger_than_zero && was_zero_or_negative &&
!resource_user->added_to_free_pool) {
@ -990,11 +974,9 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
}
void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name.c_str(),
resource_user->name.c_str());
}
grpc_resource_quota_trace.Log(GPR_INFO, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name.c_str(),
resource_user->name.c_str());
resource_user->resource_quota->combiner->Run(
&resource_user->resource_quota->rq_reclamation_done_closure,
GRPC_ERROR_NONE);
@ -1046,15 +1028,13 @@ static size_t grpc_slice_allocator_adjust_allocation_length(
if (target > rqmax / 16 && rqmax > 1024) {
target = rqmax / 16;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(
GPR_INFO,
"SliceAllocator(%p) requested %zu bytes for (%s) intent, adjusted "
"allocation size to %zu",
slice_allocator, requested_length,
intent == grpc_slice_allocator_intent::kDefault ? "default" : "read",
target);
}
grpc_resource_quota_trace.Log(
GPR_INFO,
"SliceAllocator(%p) requested %zu bytes for (%s) intent, adjusted "
"allocation size to %zu",
slice_allocator, requested_length,
intent == grpc_slice_allocator_intent::kDefault ? "default" : "read",
target);
return target;
}

@ -357,10 +357,9 @@ grpc_error_handle grpc_set_socket_tcp_user_timeout(
}
}
if (g_socket_supports_tcp_user_timeout.load() > 0) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "Enabling TCP_USER_TIMEOUT with a timeout of %d ms",
timeout);
}
grpc_tcp_trace.Log(GPR_INFO,
"Enabling TCP_USER_TIMEOUT with a timeout of %d ms",
timeout);
if (0 != setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &timeout,
sizeof(timeout))) {
gpr_log(GPR_ERROR, "setsockopt(TCP_USER_TIMEOUT) %s",
@ -380,9 +379,8 @@ grpc_error_handle grpc_set_socket_tcp_user_timeout(
}
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP_USER_TIMEOUT not supported for this platform");
}
grpc_tcp_trace.Log(GPR_INFO,
"TCP_USER_TIMEOUT not supported for this platform");
}
return GRPC_ERROR_NONE;
}

@ -82,9 +82,8 @@ static void CFStreamConnectCleanup(CFStreamConnect* connect) {
static void OnAlarm(void* arg, grpc_error_handle error) {
CFStreamConnect* connect = static_cast<CFStreamConnect*>(arg);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT :%p OnAlarm, error:%p", connect, error);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CLIENT_CONNECT :%p OnAlarm, error:%p", connect,
error);
gpr_mu_lock(&connect->mu);
grpc_closure* closure = connect->closure;
connect->closure = nil;
@ -103,9 +102,8 @@ static void OnAlarm(void* arg, grpc_error_handle error) {
static void OnOpen(void* arg, grpc_error_handle error) {
CFStreamConnect* connect = static_cast<CFStreamConnect*>(arg);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT :%p OnOpen, error:%p", connect, error);
}
grpc_tcp_trace.Log(GPR_DEBUG, "CLIENT_CONNECT :%p OnOpen, error:%p", connect,
error);
gpr_mu_lock(&connect->mu);
grpc_timer_cancel(&connect->alarm);
grpc_closure* closure = connect->closure;
@ -169,10 +167,9 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep,
gpr_ref_init(&connect->refcount, 1);
gpr_mu_init(&connect->mu);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p, %s: asynchronously connecting",
connect, connect->addr_name.c_str());
}
grpc_tcp_trace.Log(GPR_DEBUG,
"CLIENT_CONNECT: %p, %s: asynchronously connecting",
connect, connect->addr_name.c_str());
connect->slice_allocator = slice_allocator;
CFReadStreamRef read_stream;

@ -65,10 +65,9 @@ static void on_alarm(void* acp, grpc_error_handle error) {
int done;
grpc_custom_socket* socket = static_cast<grpc_custom_socket*>(acp);
grpc_custom_tcp_connect* connect = socket->connector;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name.c_str(), grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name.c_str(),
grpc_error_std_string(error).c_str());
if (error == GRPC_ERROR_NONE) {
/* error == NONE implies that the timer ran out, and wasn't cancelled. If
it was cancelled, then the handler that cancelled it also should close
@ -137,10 +136,9 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
socket->listener = nullptr;
connect->refs = 2;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %p %s: asynchronously connecting",
socket, connect->addr_name.c_str());
}
grpc_tcp_trace.Log(GPR_INFO,
"CLIENT_CONNECT: %p %s: asynchronously connecting", socket,
connect->addr_name.c_str());
GRPC_CLOSURE_INIT(&connect->on_alarm, on_alarm, socket,
grpc_schedule_on_exec_ctx);

@ -106,10 +106,9 @@ done:
static void tc_on_alarm(void* acp, grpc_error_handle error) {
int done;
async_connect* ac = static_cast<async_connect*>(acp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
ac->addr_str.c_str(), grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
ac->addr_str.c_str(),
grpc_error_std_string(error).c_str());
gpr_mu_lock(&ac->mu);
if (ac->fd != nullptr) {
grpc_fd_shutdown(
@ -145,10 +144,9 @@ static void on_writable(void* acp, grpc_error_handle error) {
GRPC_ERROR_REF(error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str.c_str(), grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str.c_str(),
grpc_error_std_string(error).c_str());
gpr_mu_lock(&ac->mu);
GPR_ASSERT(ac->fd);
@ -333,10 +331,9 @@ void grpc_tcp_client_create_from_prepared_fd(
grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str.c_str(), fdobj);
}
grpc_tcp_trace.Log(GPR_INFO,
"CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str.c_str(), fdobj);
gpr_mu_lock(&ac->mu);
GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);

@ -170,10 +170,8 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
static void tcp_read_allocation_done(void* tcpp, grpc_error_handle error) {
custom_tcp_endpoint* tcp = static_cast<custom_tcp_endpoint*>(tcpp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_std_string(error).c_str());
if (error == GRPC_ERROR_NONE) {
/* Before calling read, we allocate a buffer with exactly one slice
* to tcp->read_slices and wait for the callback indicating that the
@ -187,10 +185,8 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error_handle error) {
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
call_read_cb(tcp, GRPC_ERROR_REF(error));
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket,
grpc_error_std_string(error).c_str());
}
static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
@ -218,10 +214,8 @@ static void custom_write_callback(grpc_custom_socket* socket,
reinterpret_cast<custom_tcp_endpoint*>(socket->endpoint);
grpc_closure* cb = tcp->write_cb;
tcp->write_cb = nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "write complete on %p: error=%s", tcp->socket,
grpc_error_std_string(error).c_str());
TCP_UNREF(tcp, "write");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, cb, error);
}
@ -288,10 +282,8 @@ static void endpoint_delete_from_pollset_set(grpc_endpoint* ep,
static void endpoint_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
if (!tcp->shutting_down) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket,
grpc_error_std_string(why).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket,
grpc_error_std_string(why).c_str());
tcp->shutting_down = true;
// grpc_core::ExecCtx::Run(DEBUG_LOCATION,tcp->read_cb,
// GRPC_ERROR_REF(why));
@ -354,9 +346,7 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
}
grpc_tcp_trace.Log(GPR_INFO, "Creating TCP endpoint %p", socket);
socket->refs++;
socket->endpoint = reinterpret_cast<grpc_endpoint*>(tcp);
tcp->socket = socket;

@ -444,18 +444,14 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p run", p);
gpr_mu_lock(p->pollset_mu);
grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
@ -470,17 +466,13 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
g_backup_poller = nullptr;
g_uncovered_notifications_pending = 0;
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
grpc_core::Executor::Run(&p->run_poller, GRPC_ERROR_NONE,
grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
@ -495,10 +487,8 @@ static void drop_uncovered(grpc_tcp* /*tcp*/) {
old_count = g_uncovered_notifications_pending--;
g_backup_poller_mu->Unlock();
GPR_ASSERT(old_count > 1);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p, old_count,
old_count - 1);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
old_count, old_count - 1);
}
// gRPC API considers a Write operation to be done the moment it clears ‘flow
@ -520,9 +510,7 @@ static void cover_self(grpc_tcp* tcp) {
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p create", p);
grpc_core::Executor::Run(
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
@ -532,24 +520,18 @@ static void cover_self(grpc_tcp* tcp) {
p = g_backup_poller;
g_backup_poller_mu->Unlock();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
old_count - 1, old_count);
}
grpc_tcp_trace.Log(GPR_INFO, "BACKUP_POLLER:%p add %p cnt %d->%d", p, tcp,
old_count - 1, old_count);
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
}
static void notify_on_read(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p notify_on_read", tcp);
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p notify_on_write", tcp);
if (!grpc_event_engine_run_in_background()) {
cover_self(tcp);
}
@ -558,10 +540,8 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p got_write: %s", arg,
grpc_error_std_string(error).c_str());
drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error);
}
@ -830,10 +810,8 @@ static void tcp_do_read(grpc_tcp* tcp) {
static void tcp_read_allocation_done(void* tcpp, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_std_string(error).c_str());
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
@ -848,9 +826,7 @@ static void tcp_continue_read(grpc_tcp* tcp) {
/* Wait for allocation only when there is no buffer left. */
if (tcp->incoming_buffer->length == 0 &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p alloc_slices", tcp);
if (GPR_UNLIKELY(!grpc_slice_allocator_allocate(
tcp->slice_allocator, tcp->target_length, 1,
grpc_slice_allocator_intent::kReadBuffer, tcp->incoming_buffer,
@ -859,18 +835,14 @@ static void tcp_continue_read(grpc_tcp* tcp) {
return;
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p do_read", tcp);
tcp_do_read(tcp);
}
static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p got_read: %s", tcp,
grpc_error_std_string(error).c_str());
if (GPR_UNLIKELY(error != GRPC_ERROR_NONE)) {
grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
@ -983,9 +955,8 @@ static bool tcp_write_with_timestamps(grpc_tcp* tcp, struct msghdr* msg,
uint32_t opt = grpc_core::kTimestampingSocketOptions;
if (setsockopt(tcp->fd, SOL_SOCKET, SO_TIMESTAMPING,
static_cast<void*>(&opt), sizeof(opt)) != 0) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_ERROR, "Failed to set timestamping options on the socket.");
}
grpc_tcp_trace.Log(GPR_ERROR,
"Failed to set timestamping options on the socket.");
return false;
}
tcp->bytes_counter = -1;
@ -1069,9 +1040,7 @@ struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
auto next_cmsg = CMSG_NXTHDR(msg, cmsg);
cmsghdr* opt_stats = nullptr;
if (next_cmsg == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_ERROR, "Received timestamp without extended error");
}
grpc_tcp_trace.Log(GPR_ERROR, "Received timestamp without extended error");
return cmsg;
}
@ -1081,9 +1050,8 @@ struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
opt_stats = next_cmsg;
next_cmsg = CMSG_NXTHDR(msg, opt_stats);
if (next_cmsg == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_ERROR, "Received timestamp without extended error");
}
grpc_tcp_trace.Log(GPR_ERROR,
"Received timestamp without extended error");
return opt_stats;
}
}
@ -1091,9 +1059,7 @@ struct cmsghdr* process_timestamp(grpc_tcp* tcp, msghdr* msg,
if (!(next_cmsg->cmsg_level == SOL_IP || next_cmsg->cmsg_level == SOL_IPV6) ||
!(next_cmsg->cmsg_type == IP_RECVERR ||
next_cmsg->cmsg_type == IPV6_RECVERR)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_ERROR, "Unexpected control message");
}
grpc_tcp_trace.Log(GPR_ERROR, "Unexpected control message");
return cmsg;
}
@ -1178,11 +1144,9 @@ static bool process_errors(grpc_tcp* tcp) {
} else {
/* Got a control message that is not a timestamp or zerocopy. Don't know
* how to handle this. */
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO,
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
}
grpc_tcp_trace.Log(GPR_INFO,
"unknown control message cmsg_level:%d cmsg_type:%d",
cmsg->cmsg_level, cmsg->cmsg_type);
return processed_err;
}
}
@ -1195,10 +1159,8 @@ static bool process_errors(grpc_tcp* tcp) {
static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p got_error: %s", tcp,
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p got_error: %s", tcp,
grpc_error_std_string(error).c_str());
if (error != GRPC_ERROR_NONE ||
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
@ -1520,9 +1482,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
: tcp_flush(tcp, &error);
if (!flush_result) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "write: delayed");
}
grpc_tcp_trace.Log(GPR_INFO, "write: delayed");
notify_on_write(tcp);
// tcp_flush does not populate error if it has returned false.
GPR_DEBUG_ASSERT(error == GRPC_ERROR_NONE);
@ -1530,9 +1490,8 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
cb = tcp->write_cb;
tcp->write_cb = nullptr;
tcp->current_zerocopy_send = nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "write: %s", grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "write: %s",
grpc_error_std_string(error).c_str());
// No need to take a ref on error since tcp_flush provides a ref.
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
TCP_UNREF(tcp, "write");
@ -1593,14 +1552,11 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write");
tcp->write_cb = cb;
tcp->current_zerocopy_send = zerocopy_send_record;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "write: delayed");
}
grpc_tcp_trace.Log(GPR_INFO, "write: delayed");
notify_on_write(tcp);
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "write: %s", grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "write: %s",
grpc_error_std_string(error).c_str());
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
}
}

@ -215,10 +215,8 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
GRPC_LOG_IF_ERROR("getpeername error", err);
GRPC_ERROR_UNREF(err);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s", sp->server,
peer_name_string.c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string.c_str());
ep = custom_tcp_endpoint_create(
socket,
grpc_slice_allocator_factory_create_slice_allocator(
@ -376,11 +374,9 @@ static grpc_error_handle tcp_server_add_port(grpc_tcp_server* s,
addr = &wildcard;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s,
grpc_sockaddr_to_string(addr, false).c_str(),
grpc_error_std_string(error).c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "SERVER %p add_port %s error=%s", s,
grpc_sockaddr_to_string(addr, false).c_str(),
grpc_error_std_string(error).c_str());
family = grpc_sockaddr_get_family(addr);
socket =
@ -416,9 +412,7 @@ static void tcp_server_start(grpc_tcp_server* server,
grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
grpc_tcp_listener* sp;
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "SERVER_START %p", server);
}
grpc_tcp_trace.Log(GPR_INFO, "SERVER_START %p", server);
GPR_ASSERT(on_accept_cb);
GPR_ASSERT(!server->on_accept_cb);
server->on_accept_cb = on_accept_cb;

@ -245,10 +245,8 @@ static void on_read(void* arg, grpc_error_handle err) {
}
std::string addr_str = grpc_sockaddr_to_uri(&addr);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s",
addr_str.c_str());
}
grpc_tcp_trace.Log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s",
addr_str.c_str());
std::string name = absl::StrCat("tcp-server-connection:", addr_str);
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);
@ -597,10 +595,9 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
}
grpc_set_socket_no_sigpipe_if_possible(fd);
std::string addr_str = grpc_sockaddr_to_uri(&addr);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "SERVER_CONNECT: incoming external connection: %s",
addr_str.c_str());
}
grpc_tcp_trace.Log(GPR_INFO,
"SERVER_CONNECT: incoming external connection: %s",
addr_str.c_str());
std::string name = absl::StrCat("tcp-server-connection:", addr_str);
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);
read_notifier_pollset =

@ -183,9 +183,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
grpc_winsocket* socket = tcp->socket;
grpc_winsocket_callback_info* info = &socket->read_info;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p on_read", tcp);
GRPC_ERROR_REF(error);
@ -219,9 +217,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
}
}
} else {
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p unref read_slice", tcp);
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
error = tcp->shutting_down
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -249,9 +245,7 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
WSABUF buffers[MAX_WSABUF_COUNT];
size_t i;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p win_read", tcp);
if (tcp->shutting_down) {
grpc_core::ExecCtx::Run(
@ -319,9 +313,7 @@ static void on_write(void* tcpp, grpc_error_handle error) {
grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
}
grpc_tcp_trace.Log(GPR_INFO, "TCP:%p on_write", tcp);
GRPC_ERROR_REF(error);

@ -363,11 +363,9 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
timer->hash_table_next = nullptr;
#endif
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
gpr_log(GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]",
timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
closure->cb);
}
grpc_timer_trace.Log(
GPR_INFO, "TIMER %p: SET %" PRId64 " now %" PRId64 " call %p[%p]", timer,
deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
if (!g_shared_mutables.initialized) {
timer->pending = false;
@ -400,13 +398,12 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
timer->heap_index = INVALID_HEAP_INDEX;
list_join(&shard->list, timer);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
gpr_log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRId64
" => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
}
grpc_timer_trace.Log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRId64
" => is_first_timer=%s",
static_cast<int>(shard - g_shards),
shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
gpr_mu_unlock(&shard->mu);
/* Deadline may have decreased, we need to adjust the main queue. Note
@ -422,10 +419,8 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
grpc_timer_check. */
if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRId64,
shard->min_deadline);
}
grpc_timer_trace.Log(GPR_INFO, " .. old shard min_deadline=%" PRId64,
shard->min_deadline);
if (deadline < shard->min_deadline) {
grpc_millis old_min_deadline = g_shard_queue[0]->min_deadline;
shard->min_deadline = deadline;
@ -466,10 +461,8 @@ static void timer_cancel(grpc_timer* timer) {
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
gpr_mu_lock(&shard->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
}
grpc_timer_trace.Log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false");
if (timer->pending) {
REMOVE_FROM_HASH_TABLE(timer);
@ -508,18 +501,16 @@ static bool refill_heap(timer_shard* shard, grpc_millis now) {
saturating_add(GPR_MAX(now, shard->queue_deadline_cap),
static_cast<grpc_millis>(deadline_delta * 1000.0));
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
}
grpc_timer_check_trace.Log(
GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRId64,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
for (timer = shard->list.next; timer != &shard->list; timer = next) {
next = timer->next;
if (timer->deadline < shard->queue_deadline_cap) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, " .. add timer with deadline %" PRId64 " to heap",
timer->deadline);
}
grpc_timer_check_trace.Log(
GPR_INFO, " .. add timer with deadline %" PRId64 " to heap",
timer->deadline);
list_remove(timer);
grpc_timer_heap_add(&shard->heap, timer);
}
@ -533,26 +524,21 @@ static bool refill_heap(timer_shard* shard, grpc_millis now) {
static grpc_timer* pop_one(timer_shard* shard, grpc_millis now) {
grpc_timer* timer;
for (;;) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
}
grpc_timer_check_trace.Log(
GPR_INFO, " .. shard[%d]: heap_empty=%s",
static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
if (grpc_timer_heap_is_empty(&shard->heap)) {
if (now < shard->queue_deadline_cap) return nullptr;
if (!refill_heap(shard, now)) return nullptr;
}
timer = grpc_timer_heap_top(&shard->heap);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO,
" .. check top timer deadline=%" PRId64 " now=%" PRId64,
timer->deadline, now);
}
grpc_timer_check_trace.Log(
GPR_INFO, " .. check top timer deadline=%" PRId64 " now=%" PRId64,
timer->deadline, now);
if (timer->deadline > now) return nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_trace)) {
gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late", timer,
now - timer->deadline);
}
grpc_timer_trace.Log(GPR_INFO, "TIMER %p: FIRE %" PRId64 "ms late", timer,
now - timer->deadline);
timer->pending = false;
grpc_timer_heap_pop(&shard->heap);
return timer;
@ -574,10 +560,8 @@ static size_t pop_timers(timer_shard* shard, grpc_millis now,
}
*new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
static_cast<int>(shard - g_shards), n);
}
grpc_timer_check_trace.Log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
static_cast<int>(shard - g_shards), n);
return n;
}
@ -610,11 +594,10 @@ static grpc_timer_check_result run_some_expired_timers(
gpr_mu_lock(&g_shared_mutables.mu);
result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRId64,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
}
grpc_timer_check_trace.Log(GPR_INFO,
" .. shard[%d]->min_deadline = %" PRId64,
static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline);
while (g_shard_queue[0]->min_deadline < now ||
(now != GRPC_MILLIS_INF_FUTURE &&
@ -628,14 +611,12 @@ static grpc_timer_check_result run_some_expired_timers(
result = GRPC_TIMERS_FIRED;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO,
" .. result --> %d"
", shard[%d]->min_deadline %" PRId64 " --> %" PRId64
", now=%" PRId64,
result, static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline, new_min_deadline, now);
}
grpc_timer_check_trace.Log(
GPR_INFO,
" .. result --> %d"
", shard[%d]->min_deadline %" PRId64 " --> %" PRId64 ", now=%" PRId64,
result, static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline, new_min_deadline, now);
/* An grpc_timer_init() on the shard could intervene here, adding a new
timer that is earlier than new_min_deadline. However,
@ -695,10 +676,9 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
if (next != nullptr) {
*next = GPR_MIN(*next, min_timer);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64,
now, min_timer);
}
grpc_timer_check_trace.Log(
GPR_INFO, "TIMER CHECK SKIP: now=%" PRId64 " min_timer=%" PRId64, now,
min_timer);
return GRPC_TIMERS_CHECKED_AND_EMPTY;
}

@ -84,9 +84,7 @@ static void start_timer_thread_and_unlock(void) {
++g_waiter_count;
++g_thread_count;
gpr_mu_unlock(&g_mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "Spawn timer thread");
}
grpc_timer_check_trace.Log(GPR_INFO, "Spawn timer thread");
completed_thread* ct =
static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
ct->thd = grpc_core::Thread("grpc_global_timer", timer_thread, ct);
@ -120,17 +118,13 @@ static void run_some_timers() {
// if there's no thread waiting with a timeout, kick an existing untimed
// waiter so that the next deadline is not missed
if (!g_has_timed_waiter) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "kick untimed waiter");
}
grpc_timer_check_trace.Log(GPR_INFO, "kick untimed waiter");
gpr_cv_signal(&g_cv_wait);
}
gpr_mu_unlock(&g_mu);
}
// without our lock, flush the exec_ctx
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "flush exec_ctx");
}
grpc_timer_check_trace.Log(GPR_INFO, "flush exec_ctx");
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(&g_mu);
// garbage collect any threads that are dead
@ -200,11 +194,9 @@ static bool wait_until(grpc_millis next) {
gpr_cv_wait(&g_cv_wait, &g_mu,
grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked);
}
grpc_timer_check_trace.Log(
GPR_INFO, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation, g_kicked);
// if this was the timed waiter, then we need to check timers, and flag
// that there's now no timed waiter... we'll look for a replacement if
// there's work to do after checking timers (code above)
@ -246,9 +238,8 @@ static void timer_main_loop() {
Consequently, we can just sleep forever here and be happy at some
saved wakeup cycles. */
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "timers not checked: expect another thread to");
}
grpc_timer_check_trace.Log(
GPR_INFO, "timers not checked: expect another thread to");
next = GRPC_MILLIS_INF_FUTURE;
ABSL_FALLTHROUGH_INTENDED;
case GRPC_TIMERS_CHECKED_AND_EMPTY:
@ -272,9 +263,7 @@ static void timer_thread_cleanup(completed_thread* ct) {
ct->next = g_completed_threads;
g_completed_threads = ct;
gpr_mu_unlock(&g_mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "End timer thread");
}
grpc_timer_check_trace.Log(GPR_INFO, "End timer thread");
}
static void timer_thread(void* completed_thread_ptr) {
@ -313,20 +302,17 @@ void grpc_timer_manager_init(void) {
static void stop_threads(void) {
gpr_mu_lock(&g_mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded);
}
grpc_timer_check_trace.Log(GPR_INFO, "stop timer threads: threaded=%d",
g_threaded);
if (g_threaded) {
g_threaded = false;
gpr_cv_broadcast(&g_cv_wait);
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
grpc_timer_check_trace.Log(GPR_INFO, "num timer threads: %d",
g_thread_count);
while (g_thread_count > 0) {
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (GRPC_TRACE_FLAG_ENABLED(grpc_timer_check_trace)) {
gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
}
grpc_timer_check_trace.Log(GPR_INFO, "num timer threads: %d",
g_thread_count);
gc_completed_threads();
}
}

@ -51,19 +51,16 @@ class WorkSerializer::WorkSerializerImpl : public Orphanable {
void WorkSerializer::WorkSerializerImpl::Run(
std::function<void()> callback, const grpc_core::DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, "WorkSerializer::Run() %p Scheduling callback [%s:%d]",
this, location.file(), location.line());
}
grpc_work_serializer_trace.Log(
GPR_INFO, "WorkSerializer::Run() %p Scheduling callback [%s:%d]", this,
location.file(), location.line());
const size_t prev_size = size_.fetch_add(1);
// The work serializer should not have been orphaned.
GPR_DEBUG_ASSERT(prev_size > 0);
if (prev_size == 1) {
// There is no other closure executing right now on this work serializer.
// Execute this closure immediately.
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Executing immediately");
}
grpc_work_serializer_trace.Log(GPR_INFO, " Executing immediately");
callback();
// Loan this thread to the work serializer thread and drain the queue.
DrainQueue();
@ -72,22 +69,17 @@ void WorkSerializer::WorkSerializerImpl::Run(
new CallbackWrapper(std::move(callback), location);
// There already are closures executing on this work serializer. Simply add
// this closure to the queue.
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Scheduling on queue : item %p", cb_wrapper);
}
grpc_work_serializer_trace.Log(GPR_INFO, " Scheduling on queue : item %p",
cb_wrapper);
queue_.Push(&cb_wrapper->mpscq_node);
}
}
void WorkSerializer::WorkSerializerImpl::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, "WorkSerializer::Orphan() %p", this);
}
grpc_work_serializer_trace.Log(GPR_INFO, "WorkSerializer::Orphan() %p", this);
size_t prev_size = size_.fetch_sub(1);
if (prev_size == 1) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Destroying");
}
grpc_work_serializer_trace.Log(GPR_INFO, " Destroying");
delete this;
}
}
@ -98,24 +90,19 @@ void WorkSerializer::WorkSerializerImpl::Orphan() {
// is at least 1.
void WorkSerializer::WorkSerializerImpl::DrainQueue() {
while (true) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, "WorkSerializer::DrainQueue() %p", this);
}
grpc_work_serializer_trace.Log(GPR_INFO, "WorkSerializer::DrainQueue() %p",
this);
size_t prev_size = size_.fetch_sub(1);
GPR_DEBUG_ASSERT(prev_size >= 1);
// It is possible that while draining the queue, one of the callbacks ended
// up orphaning the work serializer. In that case, delete the object.
if (prev_size == 1) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Queue Drained. Destroying");
}
grpc_work_serializer_trace.Log(GPR_INFO, " Queue Drained. Destroying");
delete this;
return;
}
if (prev_size == 2) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Queue Drained");
}
grpc_work_serializer_trace.Log(GPR_INFO, " Queue Drained");
return;
}
// There is at least one callback on the queue. Pop the callback from the
@ -126,15 +113,12 @@ void WorkSerializer::WorkSerializerImpl::DrainQueue() {
queue_.PopAndCheckEnd(&empty_unused))) == nullptr) {
// This can happen either due to a race condition within the mpscq
// implementation or because of a race with Run()
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Queue returned nullptr, trying again");
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
gpr_log(GPR_INFO, " Running item %p : callback scheduled at [%s:%d]",
cb_wrapper, cb_wrapper->location.file(),
cb_wrapper->location.line());
grpc_work_serializer_trace.Log(GPR_INFO,
" Queue returned nullptr, trying again");
}
grpc_work_serializer_trace.Log(
GPR_INFO, " Running item %p : callback scheduled at [%s:%d]",
cb_wrapper, cb_wrapper->location.file(), cb_wrapper->location.line());
cb_wrapper->callback();
delete cb_wrapper;
}

@ -499,12 +499,11 @@ grpc_call_credentials* grpc_google_refresh_token_credentials_create(
const char* json_refresh_token, void* reserved) {
grpc_auth_refresh_token token =
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace)) {
gpr_log(GPR_INFO,
"grpc_refresh_token_credentials_create(json_refresh_token=%s, "
"reserved=%p)",
create_loggable_refresh_token(&token).c_str(), reserved);
}
grpc_api_trace.Log(
GPR_INFO,
"grpc_refresh_token_credentials_create(json_refresh_token=%s, "
"reserved=%p)",
create_loggable_refresh_token(&token).c_str(), reserved);
GPR_ASSERT(reserved == nullptr);
return grpc_refresh_token_credentials_create_from_auth_refresh_token(token)
.release();

@ -132,12 +132,11 @@ static void plugin_md_request_metadata_ready(void* request,
GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP);
grpc_plugin_credentials::pending_request* r =
static_cast<grpc_plugin_credentials::pending_request*>(request);
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"asynchronously",
r->creds, r);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"asynchronously",
r->creds, r);
// Remove request from pending list if not previously cancelled.
r->creds->pending_request_complete(r);
// If it has not been cancelled, process it.
@ -145,12 +144,12 @@ static void plugin_md_request_metadata_ready(void* request,
grpc_error_handle error =
process_plugin_result(r, md, num_md, status, error_details);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_request_metadata, error);
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin was previously "
"cancelled",
r->creds, r);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO,
"plugin_credentials[%p]: request %p: plugin was previously "
"cancelled",
r->creds, r);
gpr_free(r);
}
@ -175,10 +174,9 @@ bool grpc_plugin_credentials::get_request_metadata(
pending_requests_ = request;
gpr_mu_unlock(&mu_);
// Invoke the plugin. The callback holds a ref to us.
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin",
this, request);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin", this,
request);
Ref().release();
grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX];
size_t num_creds_md = 0;
@ -187,12 +185,11 @@ bool grpc_plugin_credentials::get_request_metadata(
if (!plugin_.get_metadata(
plugin_.state, context, plugin_md_request_metadata_ready, request,
creds_md, &num_creds_md, &status, &error_details)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin will return "
"asynchronously",
this, request);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO,
"plugin_credentials[%p]: request %p: plugin will return "
"asynchronously",
this, request);
return false; // Asynchronous return.
}
// Returned synchronously.
@ -202,20 +199,18 @@ bool grpc_plugin_credentials::get_request_metadata(
// asynchronously by plugin_cancel_get_request_metadata(), so return
// false. Otherwise, process the result.
if (request->cancelled) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p was cancelled, error "
"will be returned asynchronously",
this, request);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO,
"plugin_credentials[%p]: request %p was cancelled, error "
"will be returned asynchronously",
this, request);
retval = false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"synchronously",
this, request);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO,
"plugin_credentials[%p]: request %p: plugin returned "
"synchronously",
this, request);
*error = process_plugin_result(request, creds_md, num_creds_md, status,
error_details);
}
@ -236,10 +231,9 @@ void grpc_plugin_credentials::cancel_get_request_metadata(
for (pending_request* pending_request = pending_requests_;
pending_request != nullptr; pending_request = pending_request->next) {
if (pending_request->md_array == md_array) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_plugin_credentials_trace)) {
gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", this,
pending_request);
}
grpc_plugin_credentials_trace.Log(
GPR_INFO, "plugin_credentials[%p]: cancelling request %p", this,
pending_request);
pending_request->cancelled = true;
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
pending_request->on_request_metadata,

@ -744,10 +744,9 @@ static void cancel_with_status(grpc_call* c, grpc_status_code status,
}
static void set_final_status(grpc_call* call, grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_error_trace)) {
gpr_log(GPR_DEBUG, "set_final_status %s", call->is_client ? "CLI" : "SVR");
gpr_log(GPR_DEBUG, "%s", grpc_error_std_string(error).c_str());
}
grpc_call_error_trace.Log(GPR_DEBUG, "set_final_status %s: %s",
call->is_client ? "CLI" : "SVR",
grpc_error_std_string(error).c_str());
if (call->is_client) {
grpc_error_get_status(error, call->send_deadline,
call->final_op.client.status,

@ -1152,9 +1152,7 @@ void Server::ChannelData::Destroy() {
server_->MaybeFinishShutdown();
GRPC_CLOSURE_INIT(&finish_destroy_channel_closure_, FinishDestroy, this,
grpc_schedule_on_exec_ctx);
if (GRPC_TRACE_FLAG_ENABLED(grpc_server_channel_trace)) {
gpr_log(GPR_INFO, "Disconnected client");
}
grpc_server_channel_trace.Log(GPR_INFO, "Disconnected client");
grpc_transport_op* op =
grpc_make_transport_op(&finish_destroy_channel_closure_);
op->set_accept_stream = true;

@ -46,21 +46,17 @@ grpc_millis BdpEstimator::CompletePing() {
1e-9 * static_cast<double>(dt_ts.tv_nsec);
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
gpr_log(GPR_INFO,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
bw_est_ / 125000.0);
}
grpc_bdp_estimator_trace.Log(GPR_INFO,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt,
bw / 125000.0, bw_est_ / 125000.0);
GPR_ASSERT(ping_state_ == PingState::STARTED);
if (accumulator_ > 2 * estimate_ / 3 && bw > bw_est_) {
estimate_ = GPR_MAX(accumulator_, estimate_ * 2);
bw_est_ = bw;
if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
gpr_log(GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_,
estimate_);
}
grpc_bdp_estimator_trace.Log(
GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_, estimate_);
inter_ping_delay_ /= 2; // if the ping estimate changes,
// exponentially get faster at probing
} else if (inter_ping_delay_ < 10000) {
@ -74,10 +70,8 @@ grpc_millis BdpEstimator::CompletePing() {
}
if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0;
if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_,
inter_ping_delay_);
}
grpc_bdp_estimator_trace.Log(GPR_INFO, "bdp[%s]:update_inter_time to %dms",
name_, inter_ping_delay_);
}
ping_state_ = PingState::UNSCHEDULED;
accumulator_ = 0;

@ -49,10 +49,9 @@ class BdpEstimator {
// grpc_bdp_estimator_add_incoming_bytes once a ping has been scheduled by a
// transport (but not necessarily started)
void SchedulePing() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
gpr_log(GPR_INFO, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
grpc_bdp_estimator_trace.Log(GPR_INFO,
"bdp[%s]:sched acc=%" PRId64 " est=%" PRId64,
name_, accumulator_, estimate_);
GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED);
ping_state_ = PingState::SCHEDULED;
accumulator_ = 0;
@ -62,10 +61,9 @@ class BdpEstimator {
// once
// the ping is on the wire
void StartPing() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace)) {
gpr_log(GPR_INFO, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_);
}
grpc_bdp_estimator_trace.Log(GPR_INFO,
"bdp[%s]:start acc=%" PRId64 " est=%" PRId64,
name_, accumulator_, estimate_);
GPR_ASSERT(ping_state_ == PingState::SCHEDULED);
ping_state_ = PingState::STARTED;
ping_start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);

@ -75,11 +75,10 @@ class AsyncConnectivityStateWatcherInterface::Notifier {
private:
static void SendNotification(void* arg, grpc_error_handle /*ignored*/) {
Notifier* self = static_cast<Notifier*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "watcher %p: delivering async notification for %s (%s)",
self->watcher_.get(), ConnectivityStateName(self->state_),
self->status_.ToString().c_str());
}
grpc_connectivity_state_trace.Log(
GPR_INFO, "watcher %p: delivering async notification for %s (%s)",
self->watcher_.get(), ConnectivityStateName(self->state_),
self->status_.ToString().c_str());
self->watcher_->OnConnectivityStateChange(self->state_, self->status_);
delete self;
}
@ -105,12 +104,11 @@ ConnectivityStateTracker::~ConnectivityStateTracker() {
state_.load(std::memory_order_relaxed);
if (current_state == GRPC_CHANNEL_SHUTDOWN) return;
for (const auto& p : watchers_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, p.first, ConnectivityStateName(current_state),
ConnectivityStateName(GRPC_CHANNEL_SHUTDOWN));
}
grpc_connectivity_state_trace.Log(
GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, p.first, ConnectivityStateName(current_state),
ConnectivityStateName(GRPC_CHANNEL_SHUTDOWN));
p.second->Notify(GRPC_CHANNEL_SHUTDOWN, absl::Status());
}
}
@ -118,19 +116,17 @@ ConnectivityStateTracker::~ConnectivityStateTracker() {
void ConnectivityStateTracker::AddWatcher(
grpc_connectivity_state initial_state,
OrphanablePtr<ConnectivityStateWatcherInterface> watcher) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "ConnectivityStateTracker %s[%p]: add watcher %p", name_,
this, watcher.get());
}
grpc_connectivity_state_trace.Log(
GPR_INFO, "ConnectivityStateTracker %s[%p]: add watcher %p", name_, this,
watcher.get());
grpc_connectivity_state current_state =
state_.load(std::memory_order_relaxed);
if (initial_state != current_state) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, watcher.get(), ConnectivityStateName(initial_state),
ConnectivityStateName(current_state));
}
grpc_connectivity_state_trace.Log(
GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, watcher.get(), ConnectivityStateName(initial_state),
ConnectivityStateName(current_state));
watcher->Notify(current_state, status_);
}
// If we're in state SHUTDOWN, don't add the watcher, so that it will
@ -142,10 +138,9 @@ void ConnectivityStateTracker::AddWatcher(
void ConnectivityStateTracker::RemoveWatcher(
ConnectivityStateWatcherInterface* watcher) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "ConnectivityStateTracker %s[%p]: remove watcher %p",
name_, this, watcher);
}
grpc_connectivity_state_trace.Log(
GPR_INFO, "ConnectivityStateTracker %s[%p]: remove watcher %p", name_,
this, watcher);
watchers_.erase(watcher);
}
@ -155,20 +150,18 @@ void ConnectivityStateTracker::SetState(grpc_connectivity_state state,
grpc_connectivity_state current_state =
state_.load(std::memory_order_relaxed);
if (state == current_state) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "ConnectivityStateTracker %s[%p]: %s -> %s (%s, %s)",
name_, this, ConnectivityStateName(current_state),
ConnectivityStateName(state), reason, status.ToString().c_str());
}
grpc_connectivity_state_trace.Log(
GPR_INFO, "ConnectivityStateTracker %s[%p]: %s -> %s (%s, %s)", name_,
this, ConnectivityStateName(current_state), ConnectivityStateName(state),
reason, status.ToString().c_str());
state_.store(state, std::memory_order_relaxed);
status_ = status;
for (const auto& p : watchers_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, p.first, ConnectivityStateName(current_state),
ConnectivityStateName(state));
}
grpc_connectivity_state_trace.Log(
GPR_INFO,
"ConnectivityStateTracker %s[%p]: notifying watcher %p: %s -> %s",
name_, this, p.first, ConnectivityStateName(current_state),
ConnectivityStateName(state));
p.second->Notify(state, status);
}
// If the new state is SHUTDOWN, orphan all of the watchers. This
@ -178,10 +171,9 @@ void ConnectivityStateTracker::SetState(grpc_connectivity_state state,
grpc_connectivity_state ConnectivityStateTracker::state() const {
grpc_connectivity_state state = state_.load(std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "ConnectivityStateTracker %s[%p]: get current state: %s",
name_, this, ConnectivityStateName(state));
}
grpc_connectivity_state_trace.Log(
GPR_INFO, "ConnectivityStateTracker %s[%p]: get current state: %s", name_,
this, ConnectivityStateName(state));
return state;
}

@ -81,10 +81,10 @@ void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
#ifndef NDEBUG
inline void grpc_stream_ref(grpc_stream_refcount* refcount,
const char* reason) {
if (grpc_trace_stream_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%s %p:%p REF %s", refcount->object_type, refcount,
refcount->destroy.cb_arg, reason);
}
grpc_trace_stream_refcount.Log(GPR_DEBUG, "%s %p:%p REF %s",
refcount->object_type, refcount,
refcount->destroy.cb_arg, reason);
refcount->refs.RefNonZero(DEBUG_LOCATION, reason);
}
#else
@ -98,10 +98,10 @@ void grpc_stream_destroy(grpc_stream_refcount* refcount);
#ifndef NDEBUG
inline void grpc_stream_unref(grpc_stream_refcount* refcount,
const char* reason) {
if (grpc_trace_stream_refcount.enabled()) {
gpr_log(GPR_DEBUG, "%s %p:%p UNREF %s", refcount->object_type, refcount,
refcount->destroy.cb_arg, reason);
}
grpc_trace_stream_refcount.Log(GPR_DEBUG, "%s %p:%p UNREF %s",
refcount->object_type, refcount,
refcount->destroy.cb_arg, reason);
if (GPR_UNLIKELY(refcount->refs.Unref(DEBUG_LOCATION, reason))) {
grpc_stream_destroy(refcount);
}

@ -598,11 +598,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;
}
if (GRPC_TRACE_FLAG_ENABLED(tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "%s prepared %s.",
impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(impl->next_message_to_send));
}
tsi_tracing_enabled.Log(
GPR_INFO, "%s prepared %s.", impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(impl->next_message_to_send));
impl->next_message_to_send = next_message_to_send;
}
result = tsi_fake_frame_encode(bytes, bytes_size, &impl->outgoing_frame);
@ -610,9 +608,7 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (!impl->is_client &&
impl->next_message_to_send == TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
/* We're done. */
if (GRPC_TRACE_FLAG_ENABLED(tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "Server is done.");
}
tsi_tracing_enabled.Log(GPR_INFO, "Server is done.");
impl->result = TSI_OK;
} else {
impl->needs_incoming_message = 1;
@ -649,17 +645,15 @@ static tsi_result fake_handshaker_process_bytes_from_peer(
tsi_fake_handshake_message_to_string(received_msg),
tsi_fake_handshake_message_to_string(expected_msg));
}
if (GRPC_TRACE_FLAG_ENABLED(tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "%s received %s.", impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(received_msg));
}
tsi_tracing_enabled.Log(GPR_INFO, "%s received %s.",
impl->is_client ? "Client" : "Server",
tsi_fake_handshake_message_to_string(received_msg));
tsi_fake_frame_reset(&impl->incoming_frame, 0 /* needs_draining */);
impl->needs_incoming_message = 0;
if (impl->next_message_to_send == TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
/* We're done. */
if (GRPC_TRACE_FLAG_ENABLED(tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "%s is done.", impl->is_client ? "Client" : "Server");
}
tsi_tracing_enabled.Log(GPR_INFO, "%s is done.",
impl->is_client ? "Client" : "Server");
impl->result = TSI_OK;
}
return TSI_OK;

@ -165,10 +165,10 @@ static int alpn_select_cb(SSL* /*ssl*/, const uint8_t** out, uint8_t* out_len,
static void ssl_log_where_info(const SSL* ssl, int where, int flag,
const char* msg) {
if ((where & flag) &&
GRPC_TRACE_FLAG_ENABLED(client_ssl_tsi_tracing_enabled)) {
gpr_log(GPR_INFO, "%20.20s - %30.30s - %5.10s", msg,
SSL_state_string_long(ssl), SSL_state_string(ssl));
if (where & flag) {
client_ssl_tsi_tracing_enabled.Log(GPR_INFO, "%20.20s - %30.30s - %5.10s",
msg, SSL_state_string_long(ssl),
SSL_state_string(ssl));
}
}

@ -1732,6 +1732,7 @@ src/core/lib/gpr/env_posix.cc \
src/core/lib/gpr/env_windows.cc \
src/core/lib/gpr/log.cc \
src/core/lib/gpr/log_android.cc \
src/core/lib/gpr/log_internal.h \
src/core/lib/gpr/log_linux.cc \
src/core/lib/gpr/log_posix.cc \
src/core/lib/gpr/log_windows.cc \

@ -1570,6 +1570,7 @@ src/core/lib/gpr/env_posix.cc \
src/core/lib/gpr/env_windows.cc \
src/core/lib/gpr/log.cc \
src/core/lib/gpr/log_android.cc \
src/core/lib/gpr/log_internal.h \
src/core/lib/gpr/log_linux.cc \
src/core/lib/gpr/log_posix.cc \
src/core/lib/gpr/log_windows.cc \

Loading…
Cancel
Save