Merge branch 'master' into FakeStatsPluginLocks

pull/37409/head
Yash Tibrewal 6 months ago
commit d60b50273f
  1. 25
      BUILD
  2. 13
      CMakeLists.txt
  3. 2
      Rakefile
  4. 5
      bazel/experiments.bzl
  5. 1
      bazel/grpc_build_system.bzl
  6. 12
      bazel/grpc_deps.bzl
  7. 13
      build_autogenerated.yaml
  8. 9
      examples/python/helloworld/helloworld_pb2_grpc.py
  9. 2
      gRPC-C++.podspec
  10. 2
      grpc.def
  11. 2
      grpc.gemspec
  12. 18
      include/grpc/support/log.h
  13. 26
      include/grpcpp/support/callback_common.h
  14. 58
      include/grpcpp/support/global_callback_hook.h
  15. 10
      src/compiler/python_generator.cc
  16. 2
      src/core/BUILD
  17. 129
      src/core/client_channel/retry_filter_legacy_call_data.cc
  18. 8
      src/core/client_channel/subchannel.cc
  19. 10
      src/core/ext/filters/backend_metrics/backend_metric_filter.cc
  20. 9
      src/core/ext/filters/http/message_compress/compression_filter.cc
  21. 9
      src/core/ext/filters/message_size/message_size_filter.cc
  22. 2
      src/core/ext/transport/binder/wire_format/wire_writer.cc
  23. 5
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  24. 5
      src/core/ext/transport/chaotic_good/server_transport.cc
  25. 5
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  26. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  27. 27
      src/core/handshaker/handshaker.cc
  28. 14
      src/core/handshaker/security/secure_endpoint.cc
  29. 89
      src/core/lib/channel/promise_based_filter.cc
  30. 6
      src/core/lib/debug/trace_impl.h
  31. 129
      src/core/lib/event_engine/ares_resolver.cc
  32. 8
      src/core/lib/event_engine/ares_resolver.h
  33. 11
      src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc
  34. 8
      src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc
  35. 48
      src/core/lib/experiments/experiments.cc
  36. 19
      src/core/lib/experiments/experiments.h
  37. 27
      src/core/lib/experiments/experiments.yaml
  38. 4
      src/core/lib/experiments/rollouts.yaml
  39. 36
      src/core/lib/gprpp/work_serializer.cc
  40. 9
      src/core/lib/iomgr/ev_apple.cc
  41. 110
      src/core/lib/iomgr/ev_epoll1_linux.cc
  42. 8
      src/core/lib/iomgr/ev_poll_posix.cc
  43. 85
      src/core/lib/iomgr/ev_posix.cc
  44. 75
      src/core/lib/iomgr/executor.cc
  45. 49
      src/core/lib/iomgr/tcp_posix.cc
  46. 5
      src/core/lib/iomgr/tcp_server_posix.cc
  47. 16
      src/core/lib/iomgr/tcp_windows.cc
  48. 9
      src/core/lib/iomgr/timer_manager.cc
  49. 23
      src/core/lib/promise/for_each.h
  50. 20
      src/core/lib/promise/inter_activity_latch.h
  51. 40
      src/core/lib/promise/latch.h
  52. 23
      src/core/lib/resource_quota/memory_quota.cc
  53. 9
      src/core/lib/security/authorization/grpc_authorization_policy_provider.cc
  54. 9
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  55. 7
      src/core/lib/surface/call.cc
  56. 19
      src/core/lib/surface/call.h
  57. 16
      src/core/lib/surface/filter_stack_call.cc
  58. 14
      src/core/lib/transport/bdp_estimator.cc
  59. 8
      src/core/lib/transport/call_filters.cc
  60. 69
      src/core/load_balancing/grpclb/grpclb.cc
  61. 48
      src/core/load_balancing/health_check_client.cc
  62. 63
      src/core/load_balancing/outlier_detection/outlier_detection.cc
  63. 151
      src/core/load_balancing/pick_first/pick_first.cc
  64. 72
      src/core/load_balancing/priority/priority.cc
  65. 26
      src/core/load_balancing/ring_hash/ring_hash.cc
  66. 253
      src/core/load_balancing/rls/rls.cc
  67. 34
      src/core/load_balancing/round_robin/round_robin.cc
  68. 49
      src/core/load_balancing/weighted_round_robin/weighted_round_robin.cc
  69. 57
      src/core/load_balancing/weighted_target/weighted_target.cc
  70. 47
      src/core/load_balancing/xds/cds.cc
  71. 19
      src/core/load_balancing/xds/xds_cluster_impl.cc
  72. 52
      src/core/load_balancing/xds/xds_cluster_manager.cc
  73. 56
      src/core/load_balancing/xds/xds_override_host.cc
  74. 23
      src/core/load_balancing/xds/xds_wrr_locality.cc
  75. 105
      src/core/resolver/dns/c_ares/dns_resolver_ares.cc
  76. 196
      src/core/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc
  77. 199
      src/core/resolver/dns/c_ares/grpc_ares_wrapper.cc
  78. 7
      src/core/resolver/dns/c_ares/grpc_ares_wrapper.h
  79. 35
      src/core/resolver/xds/xds_dependency_manager.cc
  80. 23
      src/core/resolver/xds/xds_resolver.cc
  81. 8
      src/core/server/xds_server_config_fetcher.cc
  82. 8
      src/core/tsi/fake_transport_security.cc
  83. 1
      src/core/util/android/log.cc
  84. 44
      src/core/util/http_client/httpcli.cc
  85. 19
      src/core/util/http_client/httpcli.h
  86. 19
      src/core/util/latent_see.cc
  87. 89
      src/core/util/latent_see.h
  88. 1
      src/core/util/linux/log.cc
  89. 11
      src/core/util/log.cc
  90. 1
      src/core/util/posix/log.cc
  91. 1
      src/core/util/windows/log.cc
  92. 20
      src/core/xds/grpc/xds_client_grpc.cc
  93. 89
      src/core/xds/xds_client/xds_client.cc
  94. 18
      src/core/xds/xds_client/xds_client_stats.cc
  95. 36
      src/cpp/client/global_callback_hook.cc
  96. 1
      src/cpp/ext/otel/BUILD
  97. 16
      src/cpp/ext/otel/otel_plugin.cc
  98. 1
      src/cpp/ext/otel/otel_plugin.h
  99. 113
      src/cpp/server/backend_metric_recorder.cc
  100. 1
      src/python/grpcio/grpc/_cython/_cygrpc/channel.pxd.pxi
  101. Some files were not shown because too many files have changed in this diff Show More

25
BUILD

@ -907,6 +907,7 @@ grpc_cc_library(
],
visibility = ["@grpc:grpc++_public_hdrs"],
deps = [
"global_callback_hook",
"grpc_public_hdrs",
"//src/core:gpr_atm",
],
@ -951,6 +952,7 @@ grpc_cc_library(
tags = ["nofixdeps"],
visibility = ["@grpc:public"],
deps = [
"global_callback_hook",
"grpc++_base",
"//src/core:gpr_atm",
"//src/core:slice",
@ -1260,6 +1262,7 @@ grpc_cc_library(
deps = [
"channel_arg_names",
"generic_stub_internal",
"global_callback_hook",
"gpr",
"grpc++_base_unsecure",
"grpc++_codegen_proto",
@ -2455,6 +2458,7 @@ grpc_cc_library(
"config",
"exec_ctx",
"generic_stub_internal",
"global_callback_hook",
"gpr",
"grpc",
"grpc++_codegen_proto",
@ -2544,6 +2548,7 @@ grpc_cc_library(
"config",
"exec_ctx",
"generic_stub_internal",
"global_callback_hook",
"gpr",
"grpc_base",
"grpc_core_credentials_header",
@ -3938,6 +3943,7 @@ grpc_cc_library(
deps = [
"config",
"debug_location",
"event_engine_base_hdrs",
"exec_ctx",
"gpr",
"grpc_base",
@ -3949,17 +3955,16 @@ grpc_cc_library(
"orphanable",
"ref_counted_ptr",
"resource_quota_api",
"sockaddr_utils",
"uri_parser",
"//src/core:channel_args",
"//src/core:channel_args_preconditioning",
"//src/core:closure",
"//src/core:error",
"//src/core:error_utils",
"//src/core:event_engine_tcp_socket_utils",
"//src/core:handshaker_registry",
"//src/core:iomgr_fwd",
"//src/core:pollset_set",
"//src/core:resolved_address",
"//src/core:resource_quota",
"//src/core:slice",
"//src/core:slice_refcount",
@ -4913,6 +4918,22 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "global_callback_hook",
srcs = [
"src/cpp/client/global_callback_hook.cc",
],
hdrs = [
"include/grpcpp/support/global_callback_hook.h",
],
external_deps = [
"absl/base:no_destructor",
"absl/log:check",
"absl/functional:function_ref",
],
language = "c++",
)
# TODO(yashykt): Remove the UPB definitions from here once they are no longer needed
### UPB Targets

13
CMakeLists.txt generated

@ -4182,6 +4182,7 @@ add_library(grpc++
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/client/xds_credentials.cc
@ -4465,6 +4466,7 @@ foreach(_hdr
include/grpcpp/support/client_callback.h
include/grpcpp/support/client_interceptor.h
include/grpcpp/support/config.h
include/grpcpp/support/global_callback_hook.h
include/grpcpp/support/interceptor.h
include/grpcpp/support/message_allocator.h
include/grpcpp/support/method_handler.h
@ -4938,6 +4940,7 @@ add_library(grpc++_unsecure
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/common/alarm.cc
src/cpp/common/channel_arguments.cc
@ -5209,6 +5212,7 @@ foreach(_hdr
include/grpcpp/support/client_callback.h
include/grpcpp/support/client_interceptor.h
include/grpcpp/support/config.h
include/grpcpp/support/global_callback_hook.h
include/grpcpp/support/interceptor.h
include/grpcpp/support/message_allocator.h
include/grpcpp/support/method_handler.h
@ -8266,6 +8270,7 @@ add_executable(binder_transport_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -13405,6 +13410,7 @@ add_executable(endpoint_binder_pool_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -14260,6 +14266,7 @@ add_executable(fake_binder_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -32271,6 +32278,7 @@ add_executable(transport_stream_receiver_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -33157,6 +33165,7 @@ add_executable(wire_reader_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -33267,6 +33276,7 @@ add_executable(wire_writer_test
src/cpp/client/create_channel.cc
src/cpp/client/create_channel_internal.cc
src/cpp/client/create_channel_posix.cc
src/cpp/client/global_callback_hook.cc
src/cpp/client/insecure_credentials.cc
src/cpp/client/secure_credentials.cc
src/cpp/common/alarm.cc
@ -33847,6 +33857,7 @@ add_executable(xds_client_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.h
src/cpp/client/global_callback_hook.cc
src/cpp/util/status.cc
test/core/xds/xds_client_test.cc
test/core/xds/xds_transport_fake.cc
@ -34142,6 +34153,7 @@ add_executable(xds_cluster_resource_type_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/wrr_locality.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/wrr_locality.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/wrr_locality.grpc.pb.h
src/cpp/client/global_callback_hook.cc
src/cpp/util/status.cc
test/core/xds/xds_cluster_resource_type_test.cc
)
@ -35138,6 +35150,7 @@ add_executable(xds_endpoint_resource_type_test
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/xds/v3/percent.grpc.pb.h
src/cpp/client/global_callback_hook.cc
src/cpp/util/status.cc
test/core/xds/xds_endpoint_resource_type_test.cc
)

@ -143,7 +143,7 @@ task 'gem:native', [:plat] do |t, args|
verbose = ENV['V'] || '0'
grpc_config = ENV['GRPC_CONFIG'] || 'opt'
ruby_cc_versions = ['3.3.0', '3.2.0', '3.1.0', '3.0.0', '2.7.0'].join(':')
ruby_cc_versions = ['3.3.0', '3.2.0', '3.1.0', '3.0.0'].join(':')
selected_plat = "#{args[:plat]}"
# use env variable to set artifact build paralellism

@ -17,7 +17,6 @@
"""Dictionary of tags to experiments so we know when to test different experiments."""
EXPERIMENT_ENABLES = {
"call_status_override_on_cancellation": "call_status_override_on_cancellation",
"call_tracer_in_transport": "call_tracer_in_transport",
"canary_client_privacy": "canary_client_privacy",
"client_privacy": "client_privacy",
@ -28,7 +27,6 @@ EXPERIMENT_ENABLES = {
"max_pings_wo_data_throttle": "max_pings_wo_data_throttle",
"monitoring_experiment": "monitoring_experiment",
"multiping": "multiping",
"peer_state_based_framing": "peer_state_based_framing",
"pick_first_new": "pick_first_new",
"promise_based_inproc_transport": "promise_based_inproc_transport",
"schedule_cancellation_over_write": "schedule_cancellation_over_write",
@ -58,7 +56,6 @@ EXPERIMENTS = {
],
"flow_control_test": [
"multiping",
"peer_state_based_framing",
"tcp_frame_size_tuning",
"tcp_rcv_lowat",
],
@ -105,7 +102,6 @@ EXPERIMENTS = {
],
"flow_control_test": [
"multiping",
"peer_state_based_framing",
"tcp_frame_size_tuning",
"tcp_rcv_lowat",
],
@ -146,7 +142,6 @@ EXPERIMENTS = {
],
"flow_control_test": [
"multiping",
"peer_state_based_framing",
"tcp_frame_size_tuning",
"tcp_rcv_lowat",
],

@ -626,6 +626,7 @@ def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], da
linkopts = if_not_windows(["-pthread"]) + linkopts,
tags = tags,
features = features,
visibility = visibility,
)
# buildifier: disable=unnamed-macro

@ -23,10 +23,10 @@ def grpc_deps():
if "platforms" not in native.existing_rules():
http_archive(
name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz",
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
],
)
@ -168,10 +168,10 @@ def grpc_deps():
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.7.1/bazel-skylib-1.7.1.tar.gz",
],
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
sha256 = "bc283cdfcd526a52c3201279cda4bc298652efa898b10b4db0837dc51652756f",
)
if "bazel_compdb" not in native.existing_rules():

@ -3836,6 +3836,7 @@ libs:
- include/grpcpp/support/client_callback.h
- include/grpcpp/support/client_interceptor.h
- include/grpcpp/support/config.h
- include/grpcpp/support/global_callback_hook.h
- include/grpcpp/support/interceptor.h
- include/grpcpp/support/message_allocator.h
- include/grpcpp/support/method_handler.h
@ -3915,6 +3916,7 @@ libs:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/client/xds_credentials.cc
@ -4267,6 +4269,7 @@ libs:
- include/grpcpp/support/client_callback.h
- include/grpcpp/support/client_interceptor.h
- include/grpcpp/support/config.h
- include/grpcpp/support/global_callback_hook.h
- include/grpcpp/support/interceptor.h
- include/grpcpp/support/message_allocator.h
- include/grpcpp/support/method_handler.h
@ -4303,6 +4306,7 @@ libs:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/common/alarm.cc
- src/cpp/common/channel_arguments.cc
@ -6182,6 +6186,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -9668,6 +9673,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -10143,6 +10149,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -20379,6 +20386,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -20798,6 +20806,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -20909,6 +20918,7 @@ targets:
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
- src/cpp/client/create_channel_posix.cc
- src/cpp/client/global_callback_hook.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/alarm.cc
@ -21232,6 +21242,7 @@ targets:
- src/proto/grpc/testing/xds/v3/base.proto
- src/proto/grpc/testing/xds/v3/discovery.proto
- src/proto/grpc/testing/xds/v3/percent.proto
- src/cpp/client/global_callback_hook.cc
- src/cpp/util/status.cc
- test/core/xds/xds_client_test.cc
- test/core/xds/xds_transport_fake.cc
@ -21329,6 +21340,7 @@ targets:
- src/proto/grpc/testing/xds/v3/tls.proto
- src/proto/grpc/testing/xds/v3/typed_struct.proto
- src/proto/grpc/testing/xds/v3/wrr_locality.proto
- src/cpp/client/global_callback_hook.cc
- src/cpp/util/status.cc
- test/core/xds/xds_cluster_resource_type_test.cc
deps:
@ -21676,6 +21688,7 @@ targets:
- src/proto/grpc/testing/xds/v3/endpoint.proto
- src/proto/grpc/testing/xds/v3/health_check.proto
- src/proto/grpc/testing/xds/v3/percent.proto
- src/cpp/client/global_callback_hook.cc
- src/cpp/util/status.cc
- test/core/xds/xds_endpoint_resource_type_test.cc
deps:

@ -5,10 +5,8 @@ import warnings
import helloworld_pb2 as helloworld__pb2
GRPC_GENERATED_VERSION = '1.66.0.dev0'
GRPC_GENERATED_VERSION = '1.67.0.dev0'
GRPC_VERSION = grpc.__version__
EXPECTED_ERROR_RELEASE = '1.66.0'
SCHEDULED_RELEASE_DATE = 'August 6, 2024'
_version_not_supported = False
try:
@ -18,15 +16,12 @@ except ImportError:
_version_not_supported = True
if _version_not_supported:
warnings.warn(
raise RuntimeError(
f'The grpc package installed is at version {GRPC_VERSION},'
+ f' but the generated code in helloworld_pb2_grpc.py depends on'
+ f' grpcio>={GRPC_GENERATED_VERSION}.'
+ f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
+ f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
+ f' This warning will become an error in {EXPECTED_ERROR_RELEASE},'
+ f' scheduled for release on {SCHEDULED_RELEASE_DATE}.',
RuntimeWarning
)

2
gRPC-C++.podspec generated

@ -206,6 +206,7 @@ Pod::Spec.new do |s|
'include/grpcpp/support/client_callback.h',
'include/grpcpp/support/client_interceptor.h',
'include/grpcpp/support/config.h',
'include/grpcpp/support/global_callback_hook.h',
'include/grpcpp/support/interceptor.h',
'include/grpcpp/support/message_allocator.h',
'include/grpcpp/support/method_handler.h',
@ -1378,6 +1379,7 @@ Pod::Spec.new do |s|
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_internal.h',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/global_callback_hook.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/secure_credentials.h',

2
grpc.def generated

@ -231,9 +231,7 @@ EXPORTS
gpr_cpu_num_cores
gpr_cpu_current_cpu
gpr_log
gpr_should_log
gpr_log_verbosity_init
gpr_set_log_function
gpr_format_message
gpr_strdup
gpr_asprintf

2
grpc.gemspec generated

@ -13,7 +13,7 @@ Gem::Specification.new do |s|
s.description = 'Send RPCs from Ruby using GRPC'
s.license = 'Apache-2.0'
s.required_ruby_version = '>= 2.5.0'
s.required_ruby_version = '>= 3.0'
s.files = %w( Makefile .yardopts )
s.files += %w( etc/roots.pem )

@ -51,26 +51,8 @@ typedef enum gpr_log_severity {
GPRAPI void gpr_log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) GPR_PRINT_FORMAT_CHECK(4, 5);
GPRAPI int gpr_should_log(gpr_log_severity severity);
GPRAPI void gpr_log_verbosity_init(void);
/** Log overrides: applications can use this API to intercept logging calls
and use their own implementations */
struct gpr_log_func_args {
const char* file;
int line;
gpr_log_severity severity;
const char* message;
};
typedef struct gpr_log_func_args gpr_log_func_args;
typedef void (*gpr_log_func)(gpr_log_func_args* args);
GPRAPI void gpr_set_log_function(gpr_log_func deprecated_setting);
#ifdef __cplusplus
}
#endif

@ -30,6 +30,7 @@
#include <grpcpp/impl/codegen/channel_interface.h>
#include <grpcpp/impl/completion_queue_tag.h>
#include <grpcpp/support/config.h>
#include <grpcpp/support/global_callback_hook.h>
#include <grpcpp/support/status.h>
namespace grpc {
@ -127,7 +128,18 @@ class CallbackWithStatusTag : public grpc_completion_queue_functor {
auto status = std::move(status_);
func_ = nullptr; // reset to clear this out for sure
status_ = Status(); // reset to clear this out for sure
CatchingCallback(std::move(func), std::move(status));
GetGlobalCallbackHook()->RunCallback(
call_, [func = std::move(func), status = std::move(status)]() {
#if GRPC_ALLOW_EXCEPTIONS
try {
func(status);
} catch (...) {
// nothing to return or change here, just don't crash the library
}
#else // GRPC_ALLOW_EXCEPTIONS
func(status);
#endif // GRPC_ALLOW_EXCEPTIONS
});
grpc_call_unref(call_);
}
};
@ -214,7 +226,17 @@ class CallbackWithSuccessTag : public grpc_completion_queue_functor {
#endif
if (do_callback) {
CatchingCallback(func_, ok);
GetGlobalCallbackHook()->RunCallback(call_, [this, ok]() {
#if GRPC_ALLOW_EXCEPTIONS
try {
func_(ok);
} catch (...) {
// nothing to return or change here, just don't crash the library
}
#else // GRPC_ALLOW_EXCEPTIONS
func_(ok);
#endif // GRPC_ALLOW_EXCEPTIONS
});
}
}
};

@ -0,0 +1,58 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPCPP_SUPPORT_GLOBAL_CALLBACK_HOOK_H
#define GRPCPP_SUPPORT_GLOBAL_CALLBACK_HOOK_H
#include "absl/functional/function_ref.h"
struct grpc_call;
namespace grpc {
class GlobalCallbackHook {
public:
virtual ~GlobalCallbackHook() = default;
virtual void RunCallback(grpc_call* call,
absl::FunctionRef<void()> callback) = 0;
protected:
// An exception-safe way of invoking a user-specified callback function.
template <class Func, class... Args>
void CatchingCallback(Func&& func, Args&&... args) {
#if GRPC_ALLOW_EXCEPTIONS
try {
func(std::forward<Args>(args)...);
} catch (...) {
// nothing to return or change here, just don't crash the library
}
#else // GRPC_ALLOW_EXCEPTIONS
func(std::forward<Args>(args)...);
#endif // GRPC_ALLOW_EXCEPTIONS
}
};
class DefaultGlobalCallbackHook final : public GlobalCallbackHook {
public:
void RunCallback(grpc_call* call,
absl::FunctionRef<void()> callback) override {
CatchingCallback(callback);
}
};
std::shared_ptr<GlobalCallbackHook> GetGlobalCallbackHook();
void SetGlobalCallbackHook(GlobalCallbackHook* hook);
} // namespace grpc
#endif // GRPCPP_SUPPORT_GLOBAL_CALLBACK_HOOK_H

@ -744,8 +744,6 @@ bool PrivateGenerator::PrintPreamble(grpc_generator::Printer* out) {
var["ToolsVersion"] = config.grpc_tools_version;
out->Print(var, "\nGRPC_GENERATED_VERSION = '$ToolsVersion$'\n");
out->Print("GRPC_VERSION = grpc.__version__\n");
out->Print("EXPECTED_ERROR_RELEASE = '1.66.0'\n");
out->Print("SCHEDULED_RELEASE_DATE = 'August 6, 2024'\n");
out->Print("_version_not_supported = False\n\n");
out->Print("try:\n");
{
@ -763,7 +761,7 @@ bool PrivateGenerator::PrintPreamble(grpc_generator::Printer* out) {
out->Print("\nif _version_not_supported:\n");
{
IndentScope raii_warning_indent(out);
out->Print("warnings.warn(\n");
out->Print("raise RuntimeError(\n");
{
IndentScope raii_warning_string_indent(out);
std::string filename_without_ext = file->filename_without_ext();
@ -779,11 +777,7 @@ bool PrivateGenerator::PrintPreamble(grpc_generator::Printer* out) {
"+ f' Please upgrade your grpc module to "
"grpcio>={GRPC_GENERATED_VERSION}'\n"
"+ f' or downgrade your generated code using "
"grpcio-tools<={GRPC_VERSION}.'\n"
"+ f' This warning will become an error in "
"{EXPECTED_ERROR_RELEASE},'\n"
"+ f' scheduled for release on {SCHEDULED_RELEASE_DATE}.',\n"
"RuntimeWarning\n");
"grpcio-tools<={GRPC_VERSION}.'\n");
}
out->Print(")\n");
}

@ -142,6 +142,8 @@ grpc_cc_library(
"util/latent_see.h",
],
external_deps = [
"absl/base:core_headers",
"absl/functional:any_invocable",
"absl/log",
"absl/strings",
"absl/types:optional",

@ -139,21 +139,18 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
}
},
is_transparent_retry);
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this
<< ": created attempt, lb_call=" << lb_call_.get();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld << " attempt=" << this
<< ": created attempt, lb_call=" << lb_call_.get();
// If per_attempt_recv_timeout is set, start a timer.
if (calld->retry_policy_ != nullptr &&
calld->retry_policy_->per_attempt_recv_timeout().has_value()) {
const Duration per_attempt_recv_timeout =
*calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this << ": per-attempt timeout in "
<< per_attempt_recv_timeout.millis() << " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << this << ": per-attempt timeout in "
<< per_attempt_recv_timeout.millis() << " ms";
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(calld->owning_call_, "OnPerAttemptRecvTimer");
Ref(DEBUG_LOCATION, "OnPerAttemptRecvTimer").release();
@ -316,11 +313,10 @@ void StartBatchInCallCombiner(void* arg, grpc_error_handle /*ignored*/) {
void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
grpc_transport_stream_op_batch* batch, const char* reason,
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": adding batch (" << reason
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": adding batch (" << reason
<< "): " << grpc_transport_stream_op_batch_string(batch, false);
batch->handler_private.extra_arg = lb_call_.get();
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner,
batch, grpc_schedule_on_exec_ctx);
@ -527,11 +523,10 @@ void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
AddRetriableBatches(&closures);
// Note: This will yield the call combiner.
// Start batches on LB call.
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": starting " << closures.size()
<< " retriable batches on lb_call=" << lb_call_.get();
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": starting " << closures.size()
<< " retriable batches on lb_call=" << lb_call_.get();
closures.RunClosures(calld_->call_combiner_);
}
@ -595,28 +590,24 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
++calld_->num_attempts_completed_;
if (calld_->num_attempts_completed_ >=
calld_->retry_policy_->max_attempts()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": exceeded "
<< calld_->retry_policy_->max_attempts() << " retry attempts";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": exceeded "
<< calld_->retry_policy_->max_attempts() << " retry attempts";
return false;
}
// Check server push-back.
if (server_pushback.has_value()) {
if (*server_pushback < Duration::Zero()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": not retrying due to server push-back";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": not retrying due to server push-back";
return false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": server push-back: retry in "
<< server_pushback->millis() << " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": server push-back: retry in "
<< server_pushback->millis() << " ms";
}
}
// We should retry.
@ -700,11 +691,9 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void RetryFilter::LegacyCallData::CallAttempt::
MaybeCancelPerAttemptRecvTimer() {
if (per_attempt_recv_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this
<< ": cancelling perAttemptRecvTimeout timer";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld_->chand_ << " calld=" << calld_
<< " attempt=" << this << ": cancelling perAttemptRecvTimeout timer";
if (calld_->chand_->event_engine()->Cancel(
*per_attempt_recv_timer_handle_)) {
Unref(DEBUG_LOCATION, "OnPerAttemptRecvTimer");
@ -723,11 +712,10 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
: RefCounted(GRPC_TRACE_FLAG_ENABLED(retry) ? "BatchData" : nullptr,
refcount),
call_attempt_(attempt.release()) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": creating batch " << this;
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_ << " attempt=" << call_attempt_
<< ": creating batch " << this;
// We hold a ref to the call stack for every batch sent on a call attempt.
// This is because some batches on the call attempt may not complete
// until after all of the batches are completed at the surface (because
@ -744,11 +732,10 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
}
RetryFilter::LegacyCallData::CallAttempt::BatchData::~BatchData() {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_
<< " attempt=" << call_attempt_ << ": destroying batch " << this;
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << call_attempt_->calld_->chand_
<< " calld=" << call_attempt_->calld_ << " attempt=" << call_attempt_
<< ": destroying batch " << this;
CallAttempt* call_attempt = std::exchange(call_attempt_, nullptr);
grpc_call_stack* owning_call = call_attempt->calld_->owning_call_;
call_attempt->Unref(DEBUG_LOCATION, "~BatchData");
@ -844,11 +831,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
if (GPR_UNLIKELY(
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt
<< ": deferring recv_initial_metadata_ready (Trailers-Only)";
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
std::move(batch_data);
call_attempt->recv_initial_metadata_error_ = error;
@ -1254,11 +1240,10 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
}
}
if (have_pending_send_ops) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting next batch for pending send op(s)";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << calld->chand_ << " calld=" << calld
<< " attempt=" << call_attempt_
<< ": starting next batch for pending send op(s)";
call_attempt_->AddRetriableBatches(closures);
}
}
@ -1576,11 +1561,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (GPR_UNLIKELY(batch->cancel_stream)) {
// Save cancel_error in case subsequent batches are started.
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelled from surface: "
<< StatusToString(cancelled_from_surface_);
}
GRPC_TRACE_LOG(retry, INFO) << "chand=" << chand_ << " calld=" << this
<< ": cancelled from surface: "
<< StatusToString(cancelled_from_surface_);
// Fail any pending batches.
PendingBatchesFail(cancelled_from_surface_);
// If we have a current call attempt, commit the call, then send
@ -1644,11 +1627,9 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (!retry_codepath_started_ && retry_committed_ &&
(retry_policy_ == nullptr ||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retry committed before first attempt; "
<< "creating LB call";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this
<< ": retry committed before first attempt; creating LB call";
PendingBatchClear(pending);
auto* service_config_call_data =
DownCast<ClientChannelServiceConfigCallData*>(
@ -1942,11 +1923,9 @@ void RetryFilter::LegacyCallData::StartRetryTimer(
} else {
next_attempt_timeout = retry_backoff_.NextAttemptTime() - Timestamp::Now();
}
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
LOG(INFO) << "chand=" << chand_ << " calld=" << this
<< ": retrying failed call in " << next_attempt_timeout.millis()
<< " ms";
}
GRPC_TRACE_LOG(retry, INFO)
<< "chand=" << chand_ << " calld=" << this << ": retrying failed call in "
<< next_attempt_timeout.millis() << " ms";
// Schedule retry after computed delay.
GRPC_CALL_STACK_REF(owning_call_, "OnRetryTimer");
retry_timer_handle_ =

@ -893,11 +893,9 @@ bool Subchannel::PublishTransportLocked() {
}
connecting_result_.Reset();
// Publish.
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
LOG(INFO) << "subchannel " << this << " " << key_.ToString()
<< ": new connected subchannel at "
<< connected_subchannel_.get();
}
GRPC_TRACE_LOG(subchannel, INFO)
<< "subchannel " << this << " " << key_.ToString()
<< ": new connected subchannel at " << connected_subchannel_.get();
if (channelz_node_ != nullptr) {
channelz_node_->SetChildSocket(std::move(socket_node));
}

@ -128,9 +128,8 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
if (md.get(GrpcCallWasCancelled()).value_or(false)) return;
auto* ctx = MaybeGetContext<BackendMetricProvider>();
if (ctx == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
LOG(INFO) << "[" << this << "] No BackendMetricProvider.";
}
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this << "] No BackendMetricProvider.";
return;
}
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
@ -140,8 +139,9 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
<< "] Backend metrics serialized. size: " << serialized->size();
md.Set(EndpointLoadMetricsBinMetadata(),
Slice::FromCopiedString(std::move(*serialized)));
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
LOG(INFO) << "[" << this << "] No backend metrics.";
} else {
GRPC_TRACE_LOG(backend_metric_filter, INFO)
<< "[" << this << "] No backend metrics.";
}
}

@ -165,11 +165,10 @@ MessageHandle ChannelCompression::CompressMessage(
absl::StatusOr<MessageHandle> ChannelCompression::DecompressMessage(
bool is_client, MessageHandle message, DecompressArgs args) const {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
LOG(INFO) << "DecompressMessage: len=" << message->payload()->Length()
<< " max=" << args.max_recv_message_length.value_or(-1)
<< " alg=" << args.algorithm;
}
GRPC_TRACE_LOG(compression, INFO)
<< "DecompressMessage: len=" << message->payload()->Length()
<< " max=" << args.max_recv_message_length.value_or(-1)
<< " alg=" << args.algorithm;
auto* call_tracer = MaybeGetContext<CallTracerInterface>();
if (call_tracer != nullptr) {
call_tracer->RecordReceivedMessage(*message->payload());

@ -159,11 +159,10 @@ ServerMetadataHandle CheckPayload(const Message& msg,
absl::optional<uint32_t> max_length,
bool is_client, bool is_send) {
if (!max_length.has_value()) return nullptr;
if (GRPC_TRACE_FLAG_ENABLED(call)) {
LOG(INFO) << GetContext<Activity>()->DebugTag() << "[message_size] "
<< (is_send ? "send" : "recv")
<< " len:" << msg.payload()->Length() << " max:" << *max_length;
}
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << "[message_size] "
<< (is_send ? "send" : "recv") << " len:" << msg.payload()->Length()
<< " max:" << *max_length;
if (msg.payload()->Length() <= *max_length) return nullptr;
return ServerMetadataFromStatus(
GRPC_STATUS_RESOURCE_EXHAUSTED,

@ -377,7 +377,7 @@ void WireWriterImpl::TryScheduleTransaction() {
} else {
// It is common to fill `kFlowControlWindowSize` completely because
// transactions are send at faster rate than the other end of transport
// can handle it, so here we use `GPR_DEBUG` log level.
// can handle it, so here we use VLOG(2).
VLOG(2) << "Some work cannot be scheduled yet due to slow ack from the "
"other end of transport. This transport might be blocked if "
"this number don't go down. pending_outgoing_tx_.size() = "

@ -318,9 +318,8 @@ void ChaoticGoodConnector::OnHandshakeDone(
},
EventEngineWakeupScheduler(event_engine_),
[self = RefAsSubclass<ChaoticGoodConnector>()](absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "ChaoticGoodConnector::OnHandshakeDone: " << status;
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "ChaoticGoodConnector::OnHandshakeDone: " << status;
if (status.ok()) {
MutexLock lock(&self->mu_);
self->result_->transport = new ChaoticGoodClientTransport(

@ -128,9 +128,8 @@ auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
auto ChaoticGoodServerTransport::SendFragment(
ServerFragmentFrame frame, MpscSender<ServerFrame> outgoing_frames,
CallInitiator call_initiator) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
LOG(INFO) << "CHAOTIC_GOOD: SendFragment: frame=" << frame.ToString();
}
GRPC_TRACE_LOG(chaotic_good, INFO)
<< "CHAOTIC_GOOD: SendFragment: frame=" << frame.ToString();
// Capture the call_initiator to ensure the underlying call spine is alive
// until the outgoing_frames.Send promise completes.
return Map(outgoing_frames.Send(std::move(frame)),

@ -110,8 +110,9 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (t->ping_abuse_policy.ReceivedOnePing(transport_idle)) {
grpc_chttp2_exceeded_ping_strikes(t);
}
} else if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
LOG(INFO) << "CLIENT[" << t << "]: received ping " << p->opaque_8bytes;
} else {
GRPC_TRACE_LOG(http2_ping, INFO)
<< "CLIENT[" << t << "]: received ping " << p->opaque_8bytes;
}
if (t->ack_pings) {
if (t->ping_ack_count == t->ping_ack_capacity) {

@ -117,9 +117,8 @@ void HPackCompressor::SetMaxUsableSize(uint32_t max_table_size) {
void HPackCompressor::SetMaxTableSize(uint32_t max_table_size) {
if (table_.SetMaxSize(std::min(max_usable_size_, max_table_size))) {
advertise_table_size_change_ = true;
if (GRPC_TRACE_FLAG_ENABLED(http)) {
LOG(INFO) << "set max table size from encoder to " << max_table_size;
}
GRPC_TRACE_LOG(http, INFO)
<< "set max table size from encoder to " << max_table_size;
}
}

@ -81,11 +81,10 @@ HandshakeManager::HandshakeManager()
void HandshakeManager::Add(RefCountedPtr<Handshaker> handshaker) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": adding handshaker "
<< std::string(handshaker->name()) << " [" << handshaker.get()
<< "] at index " << handshakers_.size();
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": adding handshaker "
<< std::string(handshaker->name()) << " [" << handshaker.get()
<< "] at index " << handshakers_.size();
handshakers_.push_back(std::move(handshaker));
}
@ -153,11 +152,10 @@ void HandshakeManager::Shutdown(absl::Status error) {
}
void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": error=" << error
<< " shutdown=" << is_shutdown_ << " index=" << index_
<< ", args=" << HandshakerArgsString(&args_);
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": error=" << error
<< " shutdown=" << is_shutdown_ << " index=" << index_
<< ", args=" << HandshakerArgsString(&args_);
CHECK(index_ <= handshakers_.size());
// If we got an error or we've been shut down or we're exiting early or
// we've finished the last handshaker, invoke the on_handshake_done
@ -192,11 +190,10 @@ void HandshakeManager::CallNextHandshakerLocked(absl::Status error) {
}
// Call the next handshaker.
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
LOG(INFO) << "handshake_manager " << this << ": calling handshaker "
<< handshaker->name() << " [" << handshaker.get() << "] at index "
<< index_;
}
GRPC_TRACE_LOG(handshaker, INFO)
<< "handshake_manager " << this << ": calling handshaker "
<< handshaker->name() << " [" << handshaker.get() << "] at index "
<< index_;
++index_;
handshaker->DoHandshake(&args_, [self = Ref()](absl::Status error) mutable {
MutexLock lock(&self->mu_);

@ -195,9 +195,8 @@ static void maybe_post_reclaimer(secure_endpoint* ep) {
grpc_core::ReclamationPass::kBenign,
[ep](absl::optional<grpc_core::ReclamationSweep> sweep) {
if (sweep.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "secure endpoint: benign reclamation to free memory";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "secure endpoint: benign reclamation to free memory";
grpc_slice temp_read_slice;
grpc_slice temp_write_slice;
@ -253,6 +252,13 @@ static void on_read(void* user_data, grpc_error_handle error) {
{
grpc_core::MutexLock l(&ep->read_mu);
// If we were shut down after this callback was scheduled with OK
// status but before it was invoked, we need to treat that as an error.
if (ep->wrapped_ep == nullptr && error.ok()) {
error = absl::CancelledError("secure endpoint shutdown");
}
uint8_t* cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
uint8_t* end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
@ -506,8 +512,10 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
static void endpoint_destroy(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
ep->read_mu.Lock();
ep->wrapped_ep.reset();
ep->memory_owner.Reset();
ep->read_mu.Unlock();
SECURE_ENDPOINT_UNREF(ep, "destroy");
}

@ -398,11 +398,9 @@ bool BaseCallData::SendMessage::IsIdle() const {
void BaseCallData::SendMessage::OnComplete(absl::Status status) {
Flusher flusher(base_);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
switch (state_) {
case State::kInitial:
case State::kIdle:
@ -429,11 +427,9 @@ void BaseCallData::SendMessage::OnComplete(absl::Status status) {
void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " SendMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " SendMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
switch (state_) {
case State::kCancelled:
case State::kCancelledButNotYetPolled:
@ -681,11 +677,10 @@ void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
}
void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.OnComplete st=" << StateString(state_)
<< " status=" << status;
switch (state_) {
case State::kInitial:
case State::kIdle:
@ -722,11 +717,9 @@ void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
void BaseCallData::ReceiveMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag() << " ReceiveMessage.Done st=" << StateString(state_)
<< " md=" << metadata.DebugString();
switch (state_) {
case State::kInitial:
state_ = State::kCancelled;
@ -842,11 +835,10 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
CHECK(push_.has_value());
auto r_push = (*push_)();
if (auto* p = r_push.value_if_ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete: "
<< (*p ? "true" : "false");
}
GRPC_TRACE_LOG(channel, INFO)
<< base_->LogTag()
<< " ReceiveMessage.WakeInsideCombiner push complete: "
<< (*p ? "true" : "false");
// We haven't pulled through yet, so this certainly shouldn't succeed.
CHECK(!*p);
state_ = State::kCancelled;
@ -1366,9 +1358,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
CapturedBatch batch(b);
Flusher flusher(this);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " StartBatch " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO) << LogTag() << " StartBatch " << DebugString();
// If this is a cancel stream, cancel anything we have pending and propagate
// the cancellation.
@ -1489,9 +1479,8 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ClientCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " Cancel error=" << error.ToString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " Cancel error=" << error.ToString();
// Track the latest reason for cancellation.
cancelled_error_ = error;
// Stop running the promise.
@ -1568,11 +1557,10 @@ void ClientCallData::StartPromise(Flusher* flusher) {
}
void ClientCallData::RecvInitialMetadataReady(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ClientCallData.RecvInitialMetadataReady "
<< DebugString() << " error:" << error.ToString()
<< " md:" << recv_initial_metadata_->metadata->DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ClientCallData.RecvInitialMetadataReady "
<< DebugString() << " error:" << error.ToString()
<< " md:" << recv_initial_metadata_->metadata->DebugString();
ScopedContext context(this);
Flusher flusher(this);
if (!error.ok()) {
@ -1974,9 +1962,8 @@ ServerCallData::ServerCallData(grpc_call_element* elem,
}
ServerCallData::~ServerCallData() {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " ~ServerCallData " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << " ~ServerCallData " << DebugString();
if (send_initial_metadata_ != nullptr) {
send_initial_metadata_->~SendInitialMetadata();
}
@ -2001,9 +1988,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
Flusher flusher(this);
bool wake = false;
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << " StartBatch: " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO) << LogTag() << " StartBatch: " << DebugString();
// If this is a cancel stream, cancel anything we have pending and
// propagate the cancellation.
@ -2306,9 +2291,8 @@ void ServerCallData::RecvInitialMetadataReadyCallback(void* arg,
void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
Flusher flusher(this);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": RecvInitialMetadataReady " << error;
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": RecvInitialMetadataReady " << error;
CHECK(recv_initial_state_ == RecvInitialState::kForwarded);
// If there was an error we just propagate that through
if (!error.ok()) {
@ -2370,9 +2354,8 @@ std::string ServerCallData::DebugString() const {
// Wakeup and poll the promise if appropriate.
void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
PollContext poll_ctx(this, flusher);
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner " << DebugString();
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner " << DebugString();
poll_ctx.ClearRepoll();
if (send_initial_metadata_ != nullptr) {
if (send_initial_metadata_->state ==
@ -2392,12 +2375,12 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
}
if (send_initial_metadata_->metadata_push_.has_value()) {
if ((*send_initial_metadata_->metadata_push_)().ready()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner: metadata_push done";
}
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner: metadata_push done";
send_initial_metadata_->metadata_push_.reset();
} else if (GRPC_TRACE_FLAG_ENABLED(channel)) {
LOG(INFO) << LogTag() << ": WakeInsideCombiner: metadata_push pending";
} else {
GRPC_TRACE_LOG(channel, INFO)
<< LogTag() << ": WakeInsideCombiner: metadata_push pending";
}
}
}

@ -81,10 +81,16 @@ class TraceFlag {
};
#define GRPC_TRACE_FLAG_ENABLED_OBJ(obj) GPR_UNLIKELY((obj).enabled())
#define GRPC_TRACE_FLAG_ENABLED(tracer) \
GPR_UNLIKELY((grpc_core::tracer##_trace).enabled())
#define GRPC_TRACE_LOG(tracer, level) \
LOG_IF(level, GRPC_TRACE_FLAG_ENABLED(tracer))
#define GRPC_TRACE_DLOG(tracer, level) \
DLOG_IF(level, GRPC_TRACE_FLAG_ENABLED(tracer))
#define GRPC_TRACE_VLOG(tracer, level) \
if (GRPC_TRACE_FLAG_ENABLED(tracer)) VLOG(level)

@ -124,7 +124,8 @@ bool IsIpv6LoopbackAvailable() {
absl::Status SetRequestDNSServer(absl::string_view dns_server,
ares_channel* channel) {
GRPC_ARES_RESOLVER_TRACE_LOG("Using DNS server %s", dns_server.data());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) Using DNS server " << dns_server;
grpc_resolved_address addr;
struct ares_addr_port_node dns_server_addr = {};
if (grpc_parse_ipv4_hostport(dns_server, &addr, /*log_errors=*/false)) {
@ -251,8 +252,9 @@ void AresResolver::Orphan() {
}
for (const auto& fd_node : fd_node_list_) {
if (!fd_node->already_shutdown) {
GRPC_ARES_RESOLVER_TRACE_LOG("resolver: %p shutdown fd: %s", this,
fd_node->polled_fd->GetName());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver: " << this
<< " shutdown fd: " << fd_node->polled_fd->GetName();
CHECK(fd_node->polled_fd->ShutdownLocked(
absl::CancelledError("AresResolver::Orphan")));
fd_node->already_shutdown = true;
@ -423,8 +425,9 @@ void AresResolver::CheckSocketsLocked() {
fd_node_list_.begin(), fd_node_list_.end(),
[sock = socks[i]](const auto& node) { return node->as == sock; });
if (iter == fd_node_list_.end()) {
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p new fd: %d", this,
socks[i]);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << this
<< " new fd: " << socks[i];
new_list.push_back(std::make_unique<FdNode>(
socks[i], polled_fd_factory_->NewGrpcPolledFdLocked(socks[i])));
} else {
@ -440,8 +443,9 @@ void AresResolver::CheckSocketsLocked() {
// to cope with the edge-triggered poller not getting an event if no
// new data arrives and c-ares hasn't read all the data in the
// previous ares_process_fd.
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p schedule read directly on: %d", this, fd_node->as);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << this
<< " schedule read directly on: " << fd_node->as;
event_engine_->Run(
[self = Ref(DEBUG_LOCATION, "CheckSocketsLocked"),
fd_node]() mutable {
@ -450,8 +454,9 @@ void AresResolver::CheckSocketsLocked() {
});
} else {
// Otherwise register with the poller for readable event.
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p notify read on: %d", this,
fd_node->as);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << this
<< " notify read on: " << fd_node->as;
fd_node->polled_fd->RegisterForOnReadableLocked(
[self = Ref(DEBUG_LOCATION, "CheckSocketsLocked"),
fd_node](absl::Status status) mutable {
@ -464,8 +469,9 @@ void AresResolver::CheckSocketsLocked() {
// has not been registered with this socket.
if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
!fd_node->writable_registered) {
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p notify write on: %d", this,
fd_node->as);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << this
<< " notify write on: " << fd_node->as;
fd_node->writable_registered = true;
fd_node->polled_fd->RegisterForOnWriteableLocked(
[self = Ref(DEBUG_LOCATION, "CheckSocketsLocked"),
@ -487,14 +493,16 @@ void AresResolver::CheckSocketsLocked() {
while (!fd_node_list_.empty()) {
FdNode* fd_node = fd_node_list_.front().get();
if (!fd_node->already_shutdown) {
GRPC_ARES_RESOLVER_TRACE_LOG("resolver: %p shutdown fd: %s", this,
fd_node->polled_fd->GetName());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver: " << this
<< " shutdown fd: " << fd_node->polled_fd->GetName();
fd_node->already_shutdown =
fd_node->polled_fd->ShutdownLocked(absl::OkStatus());
}
if (!fd_node->readable_registered && !fd_node->writable_registered) {
GRPC_ARES_RESOLVER_TRACE_LOG("resolver: %p delete fd: %s", this,
fd_node->polled_fd->GetName());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver: " << this
<< " delete fd: " << fd_node->polled_fd->GetName();
fd_node_list_.pop_front();
} else {
new_list.splice(new_list.end(), fd_node_list_, fd_node_list_.begin());
@ -508,9 +516,10 @@ void AresResolver::MaybeStartTimerLocked() {
return;
}
// Initialize the backup poll alarm
GRPC_ARES_RESOLVER_TRACE_LOG(
"request:%p MaybeStartTimerLocked next ares process poll time in %zu ms",
this, Milliseconds(kAresBackupPollAlarmDuration));
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) request:" << this
<< " MaybeStartTimerLocked next ares process poll time in "
<< Milliseconds(kAresBackupPollAlarmDuration) << " ms";
ares_backup_poll_alarm_handle_ = event_engine_->RunAfter(
kAresBackupPollAlarmDuration,
[self = Ref(DEBUG_LOCATION, "MaybeStartTimerLocked")]() {
@ -522,8 +531,9 @@ void AresResolver::OnReadable(FdNode* fd_node, absl::Status status) {
grpc_core::MutexLock lock(&mutex_);
CHECK(fd_node->readable_registered);
fd_node->readable_registered = false;
GRPC_ARES_RESOLVER_TRACE_LOG("OnReadable: fd: %d; request: %p; status: %s",
fd_node->as, this, status.ToString().c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) OnReadable: fd: " << fd_node->as
<< "; request: " << this << "; status: " << status;
if (status.ok() && !shutting_down_) {
ares_process_fd(channel_, fd_node->as, ARES_SOCKET_BAD);
} else {
@ -541,8 +551,9 @@ void AresResolver::OnWritable(FdNode* fd_node, absl::Status status) {
grpc_core::MutexLock lock(&mutex_);
CHECK(fd_node->writable_registered);
fd_node->writable_registered = false;
GRPC_ARES_RESOLVER_TRACE_LOG("OnWritable: fd: %d; request:%p; status: %s",
fd_node->as, this, status.ToString().c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) OnWritable: fd: " << fd_node->as
<< "; request:" << this << "; status: " << status;
if (status.ok() && !shutting_down_) {
ares_process_fd(channel_, ARES_SOCKET_BAD, fd_node->as);
} else {
@ -567,15 +578,16 @@ void AresResolver::OnWritable(FdNode* fd_node, absl::Status status) {
void AresResolver::OnAresBackupPollAlarm() {
grpc_core::MutexLock lock(&mutex_);
ares_backup_poll_alarm_handle_.reset();
GRPC_ARES_RESOLVER_TRACE_LOG(
"request:%p OnAresBackupPollAlarm shutting_down=%d.", this,
shutting_down_);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) request:" << this
<< " OnAresBackupPollAlarm shutting_down=" << shutting_down_;
if (!shutting_down_) {
for (const auto& fd_node : fd_node_list_) {
if (!fd_node->already_shutdown) {
GRPC_ARES_RESOLVER_TRACE_LOG(
"request:%p OnAresBackupPollAlarm; ares_process_fd. fd=%s", this,
fd_node->polled_fd->GetName());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) request:" << this
<< " OnAresBackupPollAlarm; ares_process_fd. fd="
<< fd_node->polled_fd->GetName();
ares_socket_t as = fd_node->polled_fd->GetWrappedAresSocketLocked();
ares_process_fd(channel_, as, as);
}
@ -595,13 +607,15 @@ void AresResolver::OnHostbynameDoneLocked(void* arg, int status,
std::string error_msg =
absl::StrFormat("address lookup failed for %s: %s",
hostname_qa->query_name, ares_strerror(status));
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p OnHostbynameDoneLocked: %s",
ares_resolver, error_msg.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " OnHostbynameDoneLocked: " << error_msg;
hostname_qa->error_status = AresStatusToAbslStatus(status, error_msg);
} else {
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p OnHostbynameDoneLocked name=%s ARES_SUCCESS",
ares_resolver, hostname_qa->query_name.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " OnHostbynameDoneLocked name=" << hostname_qa->query_name
<< " ARES_SUCCESS";
for (size_t i = 0; hostent->h_addr_list[i] != nullptr; i++) {
if (hostname_qa->result.size() == kMaxRecordSize) {
LOG(ERROR) << "A/AAAA response exceeds maximum record size of 65536";
@ -620,10 +634,11 @@ void AresResolver::OnHostbynameDoneLocked(void* arg, int status,
reinterpret_cast<const sockaddr*>(&addr), addr_len);
char output[INET6_ADDRSTRLEN];
ares_inet_ntop(AF_INET6, &addr.sin6_addr, output, INET6_ADDRSTRLEN);
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p c-ares resolver gets a AF_INET6 result: \n"
" addr: %s\n port: %d\n sin6_scope_id: %d\n",
ares_resolver, output, hostname_qa->port, addr.sin6_scope_id);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " c-ares resolver gets a AF_INET6 result: \n addr: " << output
<< "\n port: " << hostname_qa->port
<< "\n sin6_scope_id: " << addr.sin6_scope_id;
break;
}
case AF_INET: {
@ -638,10 +653,10 @@ void AresResolver::OnHostbynameDoneLocked(void* arg, int status,
reinterpret_cast<const sockaddr*>(&addr), addr_len);
char output[INET_ADDRSTRLEN];
ares_inet_ntop(AF_INET, &addr.sin_addr, output, INET_ADDRSTRLEN);
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p c-ares resolver gets a AF_INET result: \n"
" addr: %s\n port: %d\n",
ares_resolver, output, hostname_qa->port);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " c-ares resolver gets a AF_INET result: \n addr: " << output
<< "\n port: " << hostname_qa->port;
break;
}
default:
@ -689,8 +704,9 @@ void AresResolver::OnSRVQueryDoneLocked(void* arg, int status, int /*timeouts*/,
auto fail = [&](absl::string_view prefix) {
std::string error_message = absl::StrFormat(
"%s for %s: %s", prefix, qa->query_name, ares_strerror(status));
GRPC_ARES_RESOLVER_TRACE_LOG("OnSRVQueryDoneLocked: %s",
error_message.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) OnSRVQueryDoneLocked: "
<< error_message;
ares_resolver->event_engine_->Run(
[callback = std::move(callback),
status = AresStatusToAbslStatus(status, error_message)]() mutable {
@ -701,13 +717,14 @@ void AresResolver::OnSRVQueryDoneLocked(void* arg, int status, int /*timeouts*/,
fail("SRV lookup failed");
return;
}
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p OnSRVQueryDoneLocked name=%s ARES_SUCCESS", ares_resolver,
qa->query_name.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " OnSRVQueryDoneLocked name=" << qa->query_name << " ARES_SUCCESS";
struct ares_srv_reply* reply = nullptr;
status = ares_parse_srv_reply(abuf, alen, &reply);
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p ares_parse_srv_reply: %d",
ares_resolver, status);
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " ares_parse_srv_reply: " << status;
if (status != ARES_SUCCESS) {
fail("Failed to parse SRV reply");
return;
@ -748,8 +765,9 @@ void AresResolver::OnTXTDoneLocked(void* arg, int status, int /*timeouts*/,
auto fail = [&](absl::string_view prefix) {
std::string error_message = absl::StrFormat(
"%s for %s: %s", prefix, qa->query_name, ares_strerror(status));
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p OnTXTDoneLocked: %s",
ares_resolver, error_message.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " OnTXTDoneLocked: " << error_message;
ares_resolver->event_engine_->Run(
[callback = std::move(callback),
status = AresStatusToAbslStatus(status, error_message)]() mutable {
@ -760,9 +778,9 @@ void AresResolver::OnTXTDoneLocked(void* arg, int status, int /*timeouts*/,
fail("TXT lookup failed");
return;
}
GRPC_ARES_RESOLVER_TRACE_LOG(
"resolver:%p OnTXTDoneLocked name=%s ARES_SUCCESS", ares_resolver,
qa->query_name.c_str());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver
<< " OnTXTDoneLocked name=" << qa->query_name << " ARES_SUCCESS";
struct ares_txt_ext* reply = nullptr;
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) {
@ -779,8 +797,9 @@ void AresResolver::OnTXTDoneLocked(void* arg, int status, int /*timeouts*/,
std::string(reinterpret_cast<char*>(part->txt), part->length));
}
}
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p Got %zu TXT records", ares_resolver,
result.size());
GRPC_TRACE_LOG(cares_resolver, INFO)
<< "(EventEngine c-ares resolver) resolver:" << ares_resolver << " Got "
<< result.size() << " TXT records";
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) {
for (const auto& record : result) {
LOG(INFO) << record;

@ -47,14 +47,6 @@
namespace grpc_event_engine {
namespace experimental {
#define GRPC_ARES_RESOLVER_TRACE_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) { \
LOG(INFO) << "(EventEngine c-ares resolver) " \
<< absl::StrFormat(format, __VA_ARGS__); \
} \
} while (0)
class AresResolver : public RefCountedDNSResolverInterface {
public:
static absl::StatusOr<grpc_core::OrphanablePtr<AresResolver>>

@ -335,9 +335,18 @@ void CFStreamEndpointImpl::DoWrite(
continue;
}
size_t written_size =
CFIndex written_size =
CFWriteStreamWrite(cf_write_stream_, slice.begin(), slice.size());
if (written_size < 0) {
auto status = CFErrorToStatus(CFWriteStreamCopyError(cf_write_stream_));
GRPC_TRACE_LOG(event_engine_endpoint, INFO)
<< "CFStream write error: " << status
<< ", written_size: " << written_size;
on_writable(status);
return;
}
total_written_size += written_size;
if (written_size < slice.size()) {
SliceBuffer written;

@ -53,6 +53,14 @@ namespace grpc_event_engine {
namespace experimental {
namespace {
#define GRPC_ARES_RESOLVER_TRACE_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) { \
LOG(INFO) << "(EventEngine c-ares resolver) " \
<< absl::StrFormat(format, __VA_ARGS__); \
} \
} while (0)
constexpr int kRecvFromSourceAddrSize = 200;
constexpr int kReadBufferSize = 4192;

@ -22,11 +22,6 @@
#if defined(GRPC_CFSTREAM)
namespace {
const char* const description_call_status_override_on_cancellation =
"Avoid overriding call status of successfully finished calls if it races "
"with cancellation.";
const char* const additional_constraints_call_status_override_on_cancellation =
"{}";
const char* const description_call_tracer_in_transport =
"Transport directly passes byte counts to CallTracer.";
const char* const additional_constraints_call_tracer_in_transport = "{}";
@ -58,11 +53,6 @@ const char* const additional_constraints_monitoring_experiment = "{}";
const char* const description_multiping =
"Allow more than one ping to be in flight at a time by default.";
const char* const additional_constraints_multiping = "{}";
const char* const description_peer_state_based_framing =
"If set, the max sizes of frames sent to lower layers is controlled based "
"on the peer's memory pressure which is reflected in its max http2 frame "
"size.";
const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
@ -105,10 +95,6 @@ const char* const additional_constraints_work_serializer_dispatch = "{}";
namespace grpc_core {
const ExperimentMetadata g_experiment_metadata[] = {
{"call_status_override_on_cancellation",
description_call_status_override_on_cancellation,
additional_constraints_call_status_override_on_cancellation, nullptr, 0,
true, true},
{"call_tracer_in_transport", description_call_tracer_in_transport,
additional_constraints_call_tracer_in_transport, nullptr, 0, true, true},
{"canary_client_privacy", description_canary_client_privacy,
@ -130,8 +116,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_monitoring_experiment, nullptr, 0, true, true},
{"multiping", description_multiping, additional_constraints_multiping,
nullptr, 0, false, true},
{"peer_state_based_framing", description_peer_state_based_framing,
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_inproc_transport",
@ -166,11 +150,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
#elif defined(GPR_WINDOWS)
namespace {
const char* const description_call_status_override_on_cancellation =
"Avoid overriding call status of successfully finished calls if it races "
"with cancellation.";
const char* const additional_constraints_call_status_override_on_cancellation =
"{}";
const char* const description_call_tracer_in_transport =
"Transport directly passes byte counts to CallTracer.";
const char* const additional_constraints_call_tracer_in_transport = "{}";
@ -202,11 +181,6 @@ const char* const additional_constraints_monitoring_experiment = "{}";
const char* const description_multiping =
"Allow more than one ping to be in flight at a time by default.";
const char* const additional_constraints_multiping = "{}";
const char* const description_peer_state_based_framing =
"If set, the max sizes of frames sent to lower layers is controlled based "
"on the peer's memory pressure which is reflected in its max http2 frame "
"size.";
const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
@ -249,10 +223,6 @@ const char* const additional_constraints_work_serializer_dispatch = "{}";
namespace grpc_core {
const ExperimentMetadata g_experiment_metadata[] = {
{"call_status_override_on_cancellation",
description_call_status_override_on_cancellation,
additional_constraints_call_status_override_on_cancellation, nullptr, 0,
true, true},
{"call_tracer_in_transport", description_call_tracer_in_transport,
additional_constraints_call_tracer_in_transport, nullptr, 0, true, true},
{"canary_client_privacy", description_canary_client_privacy,
@ -274,8 +244,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_monitoring_experiment, nullptr, 0, true, true},
{"multiping", description_multiping, additional_constraints_multiping,
nullptr, 0, false, true},
{"peer_state_based_framing", description_peer_state_based_framing,
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_inproc_transport",
@ -310,11 +278,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
#else
namespace {
const char* const description_call_status_override_on_cancellation =
"Avoid overriding call status of successfully finished calls if it races "
"with cancellation.";
const char* const additional_constraints_call_status_override_on_cancellation =
"{}";
const char* const description_call_tracer_in_transport =
"Transport directly passes byte counts to CallTracer.";
const char* const additional_constraints_call_tracer_in_transport = "{}";
@ -346,11 +309,6 @@ const char* const additional_constraints_monitoring_experiment = "{}";
const char* const description_multiping =
"Allow more than one ping to be in flight at a time by default.";
const char* const additional_constraints_multiping = "{}";
const char* const description_peer_state_based_framing =
"If set, the max sizes of frames sent to lower layers is controlled based "
"on the peer's memory pressure which is reflected in its max http2 frame "
"size.";
const char* const additional_constraints_peer_state_based_framing = "{}";
const char* const description_pick_first_new =
"New pick_first impl with memory reduction.";
const char* const additional_constraints_pick_first_new = "{}";
@ -393,10 +351,6 @@ const char* const additional_constraints_work_serializer_dispatch = "{}";
namespace grpc_core {
const ExperimentMetadata g_experiment_metadata[] = {
{"call_status_override_on_cancellation",
description_call_status_override_on_cancellation,
additional_constraints_call_status_override_on_cancellation, nullptr, 0,
true, true},
{"call_tracer_in_transport", description_call_tracer_in_transport,
additional_constraints_call_tracer_in_transport, nullptr, 0, true, true},
{"canary_client_privacy", description_canary_client_privacy,
@ -418,8 +372,6 @@ const ExperimentMetadata g_experiment_metadata[] = {
additional_constraints_monitoring_experiment, nullptr, 0, true, true},
{"multiping", description_multiping, additional_constraints_multiping,
nullptr, 0, false, true},
{"peer_state_based_framing", description_peer_state_based_framing,
additional_constraints_peer_state_based_framing, nullptr, 0, false, true},
{"pick_first_new", description_pick_first_new,
additional_constraints_pick_first_new, nullptr, 0, true, true},
{"promise_based_inproc_transport",

@ -57,8 +57,6 @@ namespace grpc_core {
#ifdef GRPC_EXPERIMENTS_ARE_FINAL
#if defined(GRPC_CFSTREAM)
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
inline bool IsCallStatusOverrideOnCancellationEnabled() { return true; }
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_TRACER_IN_TRANSPORT
inline bool IsCallTracerInTransportEnabled() { return true; }
inline bool IsCanaryClientPrivacyEnabled() { return false; }
@ -71,7 +69,6 @@ inline bool IsMaxPingsWoDataThrottleEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_MONITORING_EXPERIMENT
inline bool IsMonitoringExperimentEnabled() { return true; }
inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
@ -87,8 +84,6 @@ inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; }
#elif defined(GPR_WINDOWS)
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
inline bool IsCallStatusOverrideOnCancellationEnabled() { return true; }
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_TRACER_IN_TRANSPORT
inline bool IsCallTracerInTransportEnabled() { return true; }
inline bool IsCanaryClientPrivacyEnabled() { return false; }
@ -104,7 +99,6 @@ inline bool IsMaxPingsWoDataThrottleEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_MONITORING_EXPERIMENT
inline bool IsMonitoringExperimentEnabled() { return true; }
inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
@ -120,8 +114,6 @@ inline bool IsWorkSerializerClearsTimeCacheEnabled() { return true; }
inline bool IsWorkSerializerDispatchEnabled() { return false; }
#else
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
inline bool IsCallStatusOverrideOnCancellationEnabled() { return true; }
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_TRACER_IN_TRANSPORT
inline bool IsCallTracerInTransportEnabled() { return true; }
inline bool IsCanaryClientPrivacyEnabled() { return false; }
@ -136,7 +128,6 @@ inline bool IsMaxPingsWoDataThrottleEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_MONITORING_EXPERIMENT
inline bool IsMonitoringExperimentEnabled() { return true; }
inline bool IsMultipingEnabled() { return false; }
inline bool IsPeerStateBasedFramingEnabled() { return false; }
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() { return true; }
inline bool IsPromiseBasedInprocTransportEnabled() { return false; }
@ -154,7 +145,6 @@ inline bool IsWorkSerializerDispatchEnabled() { return false; }
#else
enum ExperimentIds {
kExperimentIdCallStatusOverrideOnCancellation,
kExperimentIdCallTracerInTransport,
kExperimentIdCanaryClientPrivacy,
kExperimentIdClientPrivacy,
@ -165,7 +155,6 @@ enum ExperimentIds {
kExperimentIdMaxPingsWoDataThrottle,
kExperimentIdMonitoringExperiment,
kExperimentIdMultiping,
kExperimentIdPeerStateBasedFraming,
kExperimentIdPickFirstNew,
kExperimentIdPromiseBasedInprocTransport,
kExperimentIdScheduleCancellationOverWrite,
@ -178,10 +167,6 @@ enum ExperimentIds {
kExperimentIdWorkSerializerDispatch,
kNumExperiments
};
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_STATUS_OVERRIDE_ON_CANCELLATION
inline bool IsCallStatusOverrideOnCancellationEnabled() {
return IsExperimentEnabled<kExperimentIdCallStatusOverrideOnCancellation>();
}
#define GRPC_EXPERIMENT_IS_INCLUDED_CALL_TRACER_IN_TRANSPORT
inline bool IsCallTracerInTransportEnabled() {
return IsExperimentEnabled<kExperimentIdCallTracerInTransport>();
@ -222,10 +207,6 @@ inline bool IsMonitoringExperimentEnabled() {
inline bool IsMultipingEnabled() {
return IsExperimentEnabled<kExperimentIdMultiping>();
}
#define GRPC_EXPERIMENT_IS_INCLUDED_PEER_STATE_BASED_FRAMING
inline bool IsPeerStateBasedFramingEnabled() {
return IsExperimentEnabled<kExperimentIdPeerStateBasedFraming>();
}
#define GRPC_EXPERIMENT_IS_INCLUDED_PICK_FIRST_NEW
inline bool IsPickFirstNewEnabled() {
return IsExperimentEnabled<kExperimentIdPickFirstNew>();

@ -40,13 +40,6 @@
# This file only defines the experiments. Refer to rollouts.yaml for the rollout
# state of each experiment.
- name: call_status_override_on_cancellation
description:
Avoid overriding call status of successfully finished calls if it races with
cancellation.
expiry: 2024/08/01
owner: vigneshbabu@google.com
test_tags: []
- name: call_tracer_in_transport
description: Transport directly passes byte counts to CallTracer.
expiry: 2024/09/30
@ -82,7 +75,7 @@
uses_polling: true
- name: event_engine_listener
description: Use EventEngine listeners instead of iomgr's grpc_tcp_server
expiry: 2024/10/01
expiry: 2024/12/01
owner: vigneshbabu@google.com
test_tags: ["core_end2end_test", "event_engine_listener_test"]
uses_polling: true
@ -110,17 +103,9 @@
expiry: 2024/09/15
owner: ctiller@google.com
test_tags: [flow_control_test]
- name: peer_state_based_framing
description:
If set, the max sizes of frames sent to lower layers is controlled based
on the peer's memory pressure which is reflected in its max http2 frame
size.
expiry: 2024/08/01
owner: vigneshbabu@google.com
test_tags: ["flow_control_test"]
- name: pick_first_new
description: New pick_first impl with memory reduction.
expiry: 2024/07/30
expiry: 2024/10/30
owner: roth@google.com
test_tags: ["lb_unit_test", "cpp_lb_end2end_test", "xds_end2end_test"]
- name: promise_based_inproc_transport
@ -132,7 +117,7 @@
allow_in_fuzzing_config: false # experiment currently crashes if enabled
- name: schedule_cancellation_over_write
description: Allow cancellation op to be scheduled over a write
expiry: 2024/08/01
expiry: 2024/12/01
owner: vigneshbabu@google.com
test_tags: []
- name: server_privacy
@ -148,17 +133,17 @@
TCP would not indicate completion of a read operation until a specified
number of bytes have been read over the socket.
Buffers are also allocated according to estimated RPC sizes.
expiry: 2024/08/01
expiry: 2024/12/01
owner: vigneshbabu@google.com
test_tags: ["endpoint_test", "flow_control_test"]
- name: tcp_rcv_lowat
description: Use SO_RCVLOWAT to avoid wakeups on the read path.
expiry: 2024/08/01
expiry: 2024/12/01
owner: vigneshbabu@google.com
test_tags: ["endpoint_test", "flow_control_test"]
- name: trace_record_callops
description: Enables tracing of call batch initiation and completion.
expiry: 2024/08/01
expiry: 2024/12/01
owner: vigneshbabu@google.com
test_tags: []
- name: unconstrained_max_quota_buffer_size

@ -40,8 +40,6 @@
#
# Supported platforms: ios, windows, posix
- name: call_status_override_on_cancellation
default: true
- name: call_tracer_in_transport
default: true
- name: call_v3
@ -79,8 +77,6 @@
default: false
- name: monitoring_experiment
default: true
- name: peer_state_based_framing
default: false
- name: pending_queue_cap
default: true
- name: pick_first_new

@ -161,9 +161,8 @@ void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
refs_.fetch_sub(MakeRefPair(1, 0), std::memory_order_acq_rel);
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << " Scheduling on queue : item " << cb_wrapper;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Scheduling on queue : item " << cb_wrapper;
queue_.Push(&cb_wrapper->mpscq_node);
}
}
@ -172,19 +171,15 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
std::function<void()> callback, const DebugLocation& location) {
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::Schedule() " << this
<< " Scheduling callback " << cb_wrapper << " ["
<< location.file() << ":" << location.line() << "]";
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::Schedule() " << this << " Scheduling callback "
<< cb_wrapper << " [" << location.file() << ":" << location.line() << "]";
refs_.fetch_add(MakeRefPair(0, 1), std::memory_order_acq_rel);
queue_.Push(&cb_wrapper->mpscq_node);
}
void WorkSerializer::LegacyWorkSerializer::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::Orphan() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO) << "WorkSerializer::Orphan() " << this;
const uint64_t prev_ref_pair =
refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel);
if (GetOwners(prev_ref_pair) == 0 && GetSize(prev_ref_pair) == 1) {
@ -196,9 +191,8 @@ void WorkSerializer::LegacyWorkSerializer::Orphan() {
// The thread that calls this loans itself to the work serializer so as to
// execute all the scheduled callbacks.
void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::DrainQueue() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::DrainQueue() " << this;
// Attempt to take ownership of the WorkSerializer. Also increment the queue
// size as required by `DrainQueueOwned()`.
const uint64_t prev_ref_pair =
@ -217,9 +211,8 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
}
void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << "WorkSerializer::DrainQueueOwned() " << this;
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< "WorkSerializer::DrainQueueOwned() " << this;
while (true) {
auto prev_ref_pair = refs_.fetch_sub(MakeRefPair(0, 1));
// It is possible that while draining the queue, the last callback ended
@ -264,11 +257,10 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Queue returned nullptr, trying again";
}
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
LOG(INFO) << " Running item " << cb_wrapper
<< " : callback scheduled at [" << cb_wrapper->location.file()
<< ":" << cb_wrapper->location.line() << "]";
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Running item " << cb_wrapper << " : callback scheduled at ["
<< cb_wrapper->location.file() << ":" << cb_wrapper->location.line()
<< "]";
cb_wrapper->callback();
delete cb_wrapper;
}

@ -39,15 +39,6 @@
#include "src/core/lib/gprpp/time_util.h"
#include "src/core/lib/iomgr/ev_apple.h"
#ifndef NDEBUG
#define GRPC_POLLING_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(apple_polling)) { \
VLOG(2) << "(polling) " << absl::StrFormat(format, __VA_ARGS__); \
}
#else
#define GRPC_POLLING_TRACE(...)
#endif // NDEBUG
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
struct GlobalRunLoopContext {

@ -727,9 +727,8 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "ps: " << ps << " poll got " << r << " events";
}
GRPC_TRACE_LOG(polling, INFO)
<< "ps: " << ps << " poll got " << r << " events";
gpr_atm_rel_store(&g_epoll_set.num_events, r);
gpr_atm_rel_store(&g_epoll_set.cursor, 0);
@ -746,9 +745,8 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_STARTS:" << worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_STARTS:" << worker;
if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the
@ -765,11 +763,10 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
retry_lock_neighborhood:
gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_REORG:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " is_reassigning=" << is_reassigning;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_REORG:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " is_reassigning=" << is_reassigning;
if (pollset->seen_inactive) {
if (neighborhood != pollset->neighborhood) {
gpr_mu_unlock(&neighborhood->mu);
@ -818,11 +815,10 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " BEGIN_WAIT:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " shutdown=" << pollset->shutting_down;
}
GRPC_TRACE_LOG(polling, INFO)
<< "PS:" << pollset << " BEGIN_WAIT:" << worker
<< " kick_state=" << kick_state_string(worker->state)
<< " shutdown=" << pollset->shutting_down;
if (gpr_cv_wait(&worker->cv, &pollset->mu,
deadline.as_timespec(GPR_CLOCK_MONOTONIC)) &&
@ -877,17 +873,15 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(
&g_active_poller, 0,
reinterpret_cast<gpr_atm>(inspect_worker))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. choose next poller to be " << inspect_worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. choose next poller to be " << inspect_worker;
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. beaten to choose next poller";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. beaten to choose next poller";
}
// even if we didn't win the cas, there's a worker, we can stop
found_worker = true;
@ -903,9 +897,8 @@ static bool check_neighborhood_for_available_poller(
} while (!found_worker && inspect_worker != inspect->root_worker);
}
if (!found_worker) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. mark pollset " << inspect << " inactive";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. mark pollset " << inspect << " inactive";
inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) {
neighborhood->active_root =
@ -922,9 +915,7 @@ static bool check_neighborhood_for_available_poller(
static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << "PS:" << pollset << " END_WORKER:" << worker;
}
GRPC_TRACE_LOG(polling, INFO) << "PS:" << pollset << " END_WORKER:" << worker;
if (worker_hdl != nullptr) *worker_hdl = nullptr;
// Make sure we appear kicked
SET_KICK_STATE(worker, KICKED);
@ -933,9 +924,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) ==
reinterpret_cast<gpr_atm>(worker)) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. choose next poller to be peer " << worker;
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. choose next poller to be peer " << worker;
CHECK(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
@ -984,9 +974,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. remove worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. remove worker";
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset);
}
@ -1075,22 +1063,16 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
pollset->kicked_without_poller = true;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked_without_poller";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked_without_poller";
goto done;
}
grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. already kicked " << root_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. already kicked " << root_worker;
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. already kicked " << next_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. already kicked " << next_worker;
SET_KICK_STATE(next_worker, KICKED);
goto done;
} else if (root_worker == next_worker && // only try and wake up a poller
@ -1098,27 +1080,22 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
root_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked " << root_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked " << root_worker;
SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked " << next_worker;
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked " << next_worker;
CHECK(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
goto done;
} else if (next_worker->state == DESIGNATED_POLLER) {
if (root_worker->state != DESIGNATED_POLLER) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked root non-poller " << root_worker
<< " (initialized_cv=" << root_worker->initialized_cv
<< ") (poller=" << next_worker << ")";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. kicked root non-poller " << root_worker
<< " (initialized_cv=" << root_worker->initialized_cv
<< ") (poller=" << next_worker << ")";
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
gpr_cv_signal(&root_worker->cv);
@ -1137,9 +1114,7 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
goto done;
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kicked while waking up";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kicked while waking up";
goto done;
}
@ -1147,36 +1122,27 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
if (specific_worker->state == KICKED) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. specific worker already kicked";
}
GRPC_TRACE_LOG(polling, INFO) << " .. specific worker already kicked";
goto done;
} else if (g_current_thread_worker == specific_worker) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. mark " << specific_worker << " kicked";
}
GRPC_TRACE_LOG(polling, INFO)
<< " .. mark " << specific_worker << " kicked";
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick active poller";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick active poller";
SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick waiting worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick waiting worker";
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << " .. kick non-waiting worker";
}
GRPC_TRACE_LOG(polling, INFO) << " .. kick non-waiting worker";
SET_KICK_STATE(specific_worker, KICKED);
goto done;
}

@ -1028,9 +1028,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << pollset << " poll=" << r;
}
GRPC_TRACE_LOG(polling, INFO) << pollset << " poll=" << r;
if (r < 0) {
if (errno != EINTR) {
@ -1052,9 +1050,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (GRPC_TRACE_FLAG_ENABLED(polling)) {
LOG(INFO) << pollset << ": got_wakeup";
}
GRPC_TRACE_LOG(polling, INFO) << pollset << ": got_wakeup";
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}

@ -42,18 +42,6 @@
#include "src/core/lib/iomgr/internal_errqueue.h"
#include "src/core/util/useful.h"
// Traces fd create/close operations
// Polling API trace only enabled in debug builds
#ifndef NDEBUG
#define GRPC_POLLING_API_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(polling_api)) { \
LOG(INFO) << "(polling-api) " << absl::StrFormat(format, __VA_ARGS__); \
}
#else
#define GRPC_POLLING_API_TRACE(...)
#endif // NDEBUG
/// Default poll() function - a pointer so that it can be overridden by some
/// tests
#ifndef GPR_AIX
@ -165,7 +153,8 @@ bool grpc_event_engine_run_in_background(void) {
}
grpc_fd* grpc_fd_create(int fd, const char* name, bool track_err) {
GRPC_POLLING_API_TRACE("fd_create(%d, %s, %d)", fd, name, track_err);
GRPC_TRACE_DLOG(polling_api, INFO) << "(polling-api) fd_create(" << fd << ", "
<< name << ", " << track_err << ")";
GRPC_TRACE_LOG(fd_trace, INFO) << "(fd-trace) fd_create(" << fd << ", "
<< name << ", " << track_err << ")";
return g_event_engine->fd_create(
@ -178,8 +167,9 @@ int grpc_fd_wrapped_fd(grpc_fd* fd) {
void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
const char* reason) {
GRPC_POLLING_API_TRACE("fd_orphan(%d, %p, %p, %s)", grpc_fd_wrapped_fd(fd),
on_done, release_fd, reason);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) fd_orphan(" << grpc_fd_wrapped_fd(fd) << ", " << on_done
<< ", " << release_fd << ", " << reason << ")";
GRPC_TRACE_LOG(fd_trace, INFO)
<< "(fd-trace) grpc_fd_orphan, fd:" << grpc_fd_wrapped_fd(fd)
<< " closed";
@ -188,14 +178,16 @@ void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
}
void grpc_fd_set_pre_allocated(grpc_fd* fd) {
GRPC_POLLING_API_TRACE("fd_set_pre_allocated(%d)", grpc_fd_wrapped_fd(fd));
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) fd_set_pre_allocated(" << grpc_fd_wrapped_fd(fd) << ")";
GRPC_TRACE_LOG(fd_trace, INFO)
<< "(fd-trace) fd_set_pre_allocated(" << grpc_fd_wrapped_fd(fd) << ")";
g_event_engine->fd_set_pre_allocated(fd);
}
void grpc_fd_shutdown(grpc_fd* fd, grpc_error_handle why) {
GRPC_POLLING_API_TRACE("fd_shutdown(%d)", grpc_fd_wrapped_fd(fd));
GRPC_TRACE_LOG(polling_api, INFO)
<< "(polling-api) fd_shutdown(" << grpc_fd_wrapped_fd(fd) << ")";
GRPC_TRACE_LOG(fd_trace, INFO)
<< "(fd-trace) fd_shutdown(" << grpc_fd_wrapped_fd(fd) << ")";
g_event_engine->fd_shutdown(fd, why);
@ -226,41 +218,48 @@ void grpc_fd_set_error(grpc_fd* fd) { g_event_engine->fd_set_error(fd); }
static size_t pollset_size(void) { return g_event_engine->pollset_size; }
static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
GRPC_POLLING_API_TRACE("pollset_init(%p)", pollset);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_init(" << pollset << ")";
g_event_engine->pollset_init(pollset, mu);
}
static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
GRPC_POLLING_API_TRACE("pollset_shutdown(%p)", pollset);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_shutdown(" << pollset << ")";
g_event_engine->pollset_shutdown(pollset, closure);
}
static void pollset_destroy(grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_destroy(%p)", pollset);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_destroy(" << pollset << ")";
g_event_engine->pollset_destroy(pollset);
}
static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker,
grpc_core::Timestamp deadline) {
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") begin", pollset,
deadline.milliseconds_after_process_epoch());
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_work(" << pollset << ", "
<< deadline.milliseconds_after_process_epoch() << ") begin";
grpc_error_handle err =
g_event_engine->pollset_work(pollset, worker, deadline);
GRPC_POLLING_API_TRACE("pollset_work(%p, %" PRId64 ") end", pollset,
deadline.milliseconds_after_process_epoch());
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_work(" << pollset << ", "
<< deadline.milliseconds_after_process_epoch() << ") end";
return err;
}
static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GRPC_POLLING_API_TRACE("pollset_kick(%p, %p)", pollset, specific_worker);
GRPC_TRACE_DLOG(polling_api, INFO) << "(polling-api) pollset_kick(" << pollset
<< ", " << specific_worker << ")";
return g_event_engine->pollset_kick(pollset, specific_worker);
}
void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd) {
GRPC_POLLING_API_TRACE("pollset_add_fd(%p, %d)", pollset,
grpc_fd_wrapped_fd(fd));
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_add_fd(" << pollset << ", "
<< grpc_fd_wrapped_fd(fd) << ")";
g_event_engine->pollset_add_fd(pollset, fd);
}
@ -275,38 +274,44 @@ grpc_pollset_vtable grpc_posix_pollset_vtable = {
static grpc_pollset_set* pollset_set_create(void) {
grpc_pollset_set* pss = g_event_engine->pollset_set_create();
GRPC_POLLING_API_TRACE("pollset_set_create(%p)", pss);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_create(" << pss << ")";
return pss;
}
static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
GRPC_POLLING_API_TRACE("pollset_set_destroy(%p)", pollset_set);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_destroy(" << pollset_set << ")";
g_event_engine->pollset_set_destroy(pollset_set);
}
static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_set_add_pollset(%p, %p)", pollset_set,
pollset);
GRPC_TRACE_DLOG(polling_api, INFO) << "(polling-api) pollset_set_add_pollset("
<< pollset_set << ", " << pollset << ")";
g_event_engine->pollset_set_add_pollset(pollset_set, pollset);
}
static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {
GRPC_POLLING_API_TRACE("pollset_set_del_pollset(%p, %p)", pollset_set,
pollset);
GRPC_TRACE_DLOG(polling_api, INFO) << "(polling-api) pollset_set_del_pollset("
<< pollset_set << ", " << pollset << ")";
g_event_engine->pollset_set_del_pollset(pollset_set, pollset);
}
static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
grpc_pollset_set* item) {
GRPC_POLLING_API_TRACE("pollset_set_add_pollset_set(%p, %p)", bag, item);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_add_pollset_set(" << bag << ", " << item
<< ")";
g_event_engine->pollset_set_add_pollset_set(bag, item);
}
static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
grpc_pollset_set* item) {
GRPC_POLLING_API_TRACE("pollset_set_del_pollset_set(%p, %p)", bag, item);
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_del_pollset_set(" << bag << ", " << item
<< ")";
g_event_engine->pollset_set_del_pollset_set(bag, item);
}
@ -316,14 +321,16 @@ grpc_pollset_set_vtable grpc_posix_pollset_set_vtable = {
pollset_set_add_pollset_set, pollset_set_del_pollset_set};
void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
GRPC_POLLING_API_TRACE("pollset_set_add_fd(%p, %d)", pollset_set,
grpc_fd_wrapped_fd(fd));
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_add_fd(" << pollset_set << ", "
<< grpc_fd_wrapped_fd(fd) << ")";
g_event_engine->pollset_set_add_fd(pollset_set, fd);
}
void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
GRPC_POLLING_API_TRACE("pollset_set_del_fd(%p, %d)", pollset_set,
grpc_fd_wrapped_fd(fd));
GRPC_TRACE_DLOG(polling_api, INFO)
<< "(polling-api) pollset_set_del_fd(" << pollset_set << ", "
<< grpc_fd_wrapped_fd(fd) << ")";
g_event_engine->pollset_set_del_fd(pollset_set, fd);
}

@ -39,13 +39,6 @@
#define MAX_DEPTH 2
#define EXECUTOR_TRACE(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(executor)) { \
LOG(INFO) << "EXECUTOR " << absl::StrFormat(format, __VA_ARGS__); \
} \
} while (0)
namespace grpc_core {
namespace {
@ -110,11 +103,13 @@ size_t Executor::RunClosures(const char* executor_name,
while (c != nullptr) {
grpc_closure* next = c->next_data.next;
#ifndef NDEBUG
EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c,
c->file_created, c->line_created);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << executor_name << ") run " << c << " [created by "
<< c->file_created << ":" << c->line_created << "]";
c->scheduled = false;
#else
EXECUTOR_TRACE("(%s) run %p", executor_name, c);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << executor_name << ") run " << c;
#endif
grpc_error_handle error =
internal::StatusMoveFromHeapPtr(c->error_data.error);
@ -134,11 +129,14 @@ bool Executor::IsThreaded() const {
void Executor::SetThreading(bool threading) {
gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_);
EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") SetThreading(" << threading << ") begin";
if (threading) {
if (curr_num_threads > 0) {
EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads > 0", name_);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_
<< ") SetThreading(true). curr_num_threads > 0";
return;
}
@ -160,7 +158,9 @@ void Executor::SetThreading(bool threading) {
thd_state_[0].thd.Start();
} else { // !threading
if (curr_num_threads == 0) {
EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_
<< ") SetThreading(false). curr_num_threads == 0";
return;
}
@ -179,8 +179,9 @@ void Executor::SetThreading(bool threading) {
curr_num_threads = gpr_atm_no_barrier_load(&num_threads_);
for (gpr_atm i = 0; i < curr_num_threads; i++) {
thd_state_[i].thd.Join();
EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_,
i + 1, curr_num_threads);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") Thread " << i + 1 << " of "
<< curr_num_threads << " joined";
}
gpr_atm_rel_store(&num_threads_, 0);
@ -201,7 +202,8 @@ void Executor::SetThreading(bool threading) {
grpc_iomgr_platform_shutdown_background_closure();
}
EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") SetThreading(" << threading << ") done";
}
void Executor::Shutdown() { SetThreading(false); }
@ -214,8 +216,9 @@ void Executor::ThreadMain(void* arg) {
size_t subtract_depth = 0;
for (;;) {
EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")",
ts->name, ts->id, subtract_depth);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << ts->name << ") [" << ts->id
<< "]: step (sub_depth=" << subtract_depth << ")";
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
@ -226,7 +229,8 @@ void Executor::ThreadMain(void* arg) {
}
if (ts->shutdown) {
EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << ts->name << ") [" << ts->id << "]: shutdown";
gpr_mu_unlock(&ts->mu);
break;
}
@ -235,7 +239,8 @@ void Executor::ThreadMain(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << ts->name << ") [" << ts->id << "]: execute";
ExecCtx::Get()->InvalidateNow();
subtract_depth = RunClosures(ts->name, closures);
@ -257,10 +262,13 @@ void Executor::Enqueue(grpc_closure* closure, grpc_error_handle error,
// or already shutdown), then queue the closure on the exec context itself
if (cur_thread_count == 0) {
#ifndef NDEBUG
EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure,
closure->file_created, closure->line_created);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") schedule " << closure << " (created "
<< closure->file_created << ":" << closure->line_created
<< ") inline";
#else
EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") schedule " << closure << " inline";
#endif
grpc_closure_list_append(ExecCtx::Get()->closure_list(), closure, error);
return;
@ -280,14 +288,15 @@ void Executor::Enqueue(grpc_closure* closure, grpc_error_handle error,
for (;;) {
#ifndef NDEBUG
EXECUTOR_TRACE(
"(%s) try to schedule %p (%s) (created %s:%d) to thread "
"%" PRIdPTR,
name_, closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, ts->id);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") try to schedule " << closure << " ("
<< (is_short ? "short" : "long") << ") (created "
<< closure->file_created << ":" << closure->line_created
<< ") to thread " << ts->id;
#else
EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_,
closure, is_short ? "short" : "long", ts->id);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR (" << name_ << ") try to schedule " << closure << " ("
<< (is_short ? "short" : "long") << ") to thread " << ts->id;
#endif
gpr_mu_lock(&ts->mu);
@ -429,7 +438,8 @@ bool Executor::IsThreadedDefault() {
}
void Executor::SetThreadingAll(bool enable) {
EXECUTOR_TRACE("Executor::SetThreadingAll(%d) called", enable);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR Executor::SetThreadingAll(" << enable << ") called";
for (size_t i = 0; i < static_cast<size_t>(ExecutorType::NUM_EXECUTORS);
i++) {
executors[i]->SetThreading(enable);
@ -437,7 +447,8 @@ void Executor::SetThreadingAll(bool enable) {
}
void Executor::SetThreadingDefault(bool enable) {
EXECUTOR_TRACE("Executor::SetThreadingDefault(%d) called", enable);
GRPC_TRACE_LOG(executor, INFO)
<< "EXECUTOR Executor::SetThreadingDefault(" << enable << ") called";
executors[static_cast<size_t>(ExecutorType::DEFAULT)]->SetThreading(enable);
}

@ -618,18 +618,14 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " destroy";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " destroy";
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
backup_poller* p = static_cast<backup_poller*>(bp);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " run";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " run";
gpr_mu_lock(p->pollset_mu);
grpc_core::Timestamp deadline =
grpc_core::Timestamp::Now() + grpc_core::Duration::Seconds(10);
@ -644,17 +640,13 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
g_backup_poller = nullptr;
g_uncovered_notifications_pending = 0;
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " shutdown";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " shutdown";
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " reschedule";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " reschedule";
grpc_core::Executor::Run(&p->run_poller, absl::OkStatus(),
grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
@ -691,9 +683,7 @@ static void cover_self(grpc_tcp* tcp) {
g_backup_poller = p;
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "BACKUP_POLLER:" << p << " create";
}
GRPC_TRACE_LOG(tcp, INFO) << "BACKUP_POLLER:" << p << " create";
grpc_core::Executor::Run(
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p, nullptr),
absl::OkStatus(), grpc_core::ExecutorType::DEFAULT,
@ -709,16 +699,12 @@ static void cover_self(grpc_tcp* tcp) {
}
static void notify_on_read(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " notify_on_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_read";
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_tcp* tcp) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " notify_on_write";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " notify_on_write";
if (!grpc_event_engine_run_in_background()) {
cover_self(tcp);
}
@ -815,9 +801,8 @@ static void tcp_destroy(grpc_endpoint* ep) {
static void perform_reclamation(grpc_tcp* tcp)
ABSL_LOCKS_EXCLUDED(tcp->read_mu) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "TCP: benign reclamation to free memory";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "TCP: benign reclamation to free memory";
tcp->read_mu.Lock();
if (tcp->incoming_buffer != nullptr) {
grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
@ -910,9 +895,7 @@ static void update_rcvlowat(grpc_tcp* tcp)
static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
GRPC_LATENT_SEE_INNER_SCOPE("tcp_do_read");
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " do_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " do_read";
struct msghdr msg;
struct iovec iov[MAX_READ_IOVEC];
ssize_t read_bytes;
@ -1479,9 +1462,7 @@ static bool process_errors(grpc_tcp* tcp) {
static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_handle error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " got_error: " << error;
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " got_error: " << error;
if (!error.ok() ||
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
@ -1809,9 +1790,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
? tcp_flush_zerocopy(tcp, tcp->current_zerocopy_send, &error)
: tcp_flush(tcp, &error);
if (!flush_result) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "write: delayed";
}
GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
notify_on_write(tcp);
// tcp_flush does not populate error if it has returned false.
DCHECK(error.ok());
@ -1880,9 +1859,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write");
tcp->write_cb = cb;
tcp->current_zerocopy_send = zerocopy_send_record;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "write: delayed";
}
GRPC_TRACE_LOG(tcp, INFO) << "write: delayed";
notify_on_write(tcp);
} else {
GRPC_TRACE_LOG(tcp, INFO) << "write: " << grpc_core::StatusToString(error);

@ -459,9 +459,8 @@ static void on_read(void* arg, grpc_error_handle err) {
LOG(ERROR) << "Invalid address: " << addr_uri.status();
goto error;
}
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "SERVER_CONNECT: incoming connection: " << *addr_uri;
}
GRPC_TRACE_LOG(tcp, INFO)
<< "SERVER_CONNECT: incoming connection: " << *addr_uri;
std::string name = absl::StrCat("tcp-server-connection:", addr_uri.value());
grpc_fd* fdobj = grpc_fd_create(fd, name.c_str(), true);

@ -176,9 +176,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
grpc_winsocket* socket = tcp->socket;
grpc_winsocket_callback_info* info = &socket->read_info;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " on_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " on_read";
if (error.ok()) {
if (info->wsa_error != 0 && !tcp->shutting_down) {
@ -208,9 +206,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
}
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " unref read_slice";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " unref read_slice";
grpc_slice_buffer_reset_and_unref(tcp->read_slices);
error = grpc_error_set_int(
tcp->shutting_down ? GRPC_ERROR_CREATE("TCP stream shutting down")
@ -239,9 +235,7 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
WSABUF buffers[MAX_WSABUF_COUNT];
size_t i;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " win_read";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " win_read";
if (tcp->shutting_down) {
grpc_core::ExecCtx::Run(
@ -310,9 +304,7 @@ static void on_write(void* tcpp, grpc_error_handle error) {
grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb;
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
LOG(INFO) << "TCP:" << tcp << " on_write";
}
GRPC_TRACE_LOG(tcp, INFO) << "TCP:" << tcp << " on_write";
gpr_mu_lock(&tcp->mu);
cb = tcp->write_cb;

@ -195,11 +195,10 @@ static bool wait_until(grpc_core::Timestamp next) {
gpr_cv_wait(&g_cv_wait, &g_mu, next.as_timespec(GPR_CLOCK_MONOTONIC));
if (GRPC_TRACE_FLAG_ENABLED(timer_check)) {
LOG(INFO) << "wait ended: was_timed:"
<< (my_timed_waiter_generation == g_timed_waiter_generation)
<< " kicked:" << g_kicked;
}
GRPC_TRACE_LOG(timer_check, INFO)
<< "wait ended: was_timed:"
<< (my_timed_waiter_generation == g_timed_waiter_generation)
<< " kicked:" << g_kicked;
// if this was the timed waiter, then we need to check timers, and flag
// that there's now no timed waiter... we'll look for a replacement if
// there's work to do after checking timers (code above)

@ -172,16 +172,13 @@ class ForEach {
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION Poll<Result> PollReaderNext() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext";
}
GRPC_TRACE_LOG(promise_primitives, INFO) << DebugTag() << " PollReaderNext";
auto r = reader_next_();
if (auto* p = r.value_if_ready()) {
switch (NextValueTraits<ReaderResult>::Type(*p)) {
case NextValueType::kValue: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got value";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got value";
Destruct(&reader_next_);
auto action = action_factory_.Make(
std::move(NextValueTraits<ReaderResult>::MutableValue(*p)));
@ -190,15 +187,13 @@ class ForEach {
return PollAction();
}
case NextValueType::kEndOfStream: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got end of stream";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got end of stream";
return Done<Result>::Make(false);
}
case NextValueType::kError: {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollReaderNext: got error";
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << " PollReaderNext: got error";
return Done<Result>::Make(true);
}
}
@ -207,9 +202,7 @@ class ForEach {
}
Poll<Result> PollAction() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << " PollAction";
}
GRPC_TRACE_LOG(promise_primitives, INFO) << DebugTag() << " PollAction";
auto r = in_action_.promise();
if (auto* p = r.value_if_ready()) {
if (IsStatusOk(*p)) {

@ -45,9 +45,8 @@ class InterActivityLatch {
auto Wait() {
return [this]() -> Poll<T> {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return std::move(value_);
} else {
@ -60,9 +59,8 @@ class InterActivityLatch {
// Set the latch.
void Set(T value) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_ = true;
value_ = std::move(value);
waiters_.WakeupAsync();
@ -102,9 +100,8 @@ class InterActivityLatch<void> {
auto Wait() {
return [this]() -> Poll<Empty> {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return Empty{};
} else {
@ -117,9 +114,8 @@ class InterActivityLatch<void> {
// Set the latch.
void Set() {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_ = true;
waiters_.WakeupAsync();
}

@ -67,9 +67,8 @@ class Latch {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Wait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Wait " << StateString();
if (has_value_) {
return std::move(value_);
} else {
@ -85,9 +84,8 @@ class Latch {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<T> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "WaitAndCopy " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "WaitAndCopy " << StateString();
if (has_value_) {
return value_;
} else {
@ -98,9 +96,8 @@ class Latch {
// Set the value of the latch. Can only be called once.
void Set(T value) {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
DCHECK(!has_value_);
value_ = std::move(value);
has_value_ = true;
@ -161,9 +158,8 @@ class Latch<void> {
has_had_waiters_ = true;
#endif
return [this]() -> Poll<Empty> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (is_set_) {
return Empty{};
} else {
@ -174,9 +170,8 @@ class Latch<void> {
// Set the latch. Can only be called once.
void Set() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
DCHECK(!is_set_);
is_set_ = true;
waiter_.Wake();
@ -223,9 +218,8 @@ class ExternallyObservableLatch<void> {
// Produce a promise to wait for this latch.
auto Wait() {
return [this]() -> Poll<Empty> {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "PollWait " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "PollWait " << StateString();
if (IsSet()) {
return Empty{};
} else {
@ -236,9 +230,8 @@ class ExternallyObservableLatch<void> {
// Set the latch.
void Set() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Set " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Set " << StateString();
is_set_.store(true, std::memory_order_relaxed);
waiter_.Wake();
}
@ -246,9 +239,8 @@ class ExternallyObservableLatch<void> {
bool IsSet() const { return is_set_.load(std::memory_order_relaxed); }
void Reset() {
if (GRPC_TRACE_FLAG_ENABLED(promise_primitives)) {
LOG(INFO) << DebugTag() << "Reset " << StateString();
}
GRPC_TRACE_LOG(promise_primitives, INFO)
<< DebugTag() << "Reset " << StateString();
is_set_.store(false, std::memory_order_relaxed);
}

@ -355,9 +355,8 @@ void GrpcMemoryAllocatorImpl::MaybeDonateBack() {
if (free_bytes_.compare_exchange_weak(free, new_free,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "[" << this << "] Early return " << ret << " bytes";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "[" << this << "] Early return " << ret << " bytes";
CHECK(taken_bytes_.fetch_sub(ret, std::memory_order_relaxed) >= ret);
memory_quota_->Return(ret);
return;
@ -548,9 +547,7 @@ void BasicMemoryQuota::Return(size_t amount) {
}
void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Adding allocator " << allocator;
}
GRPC_TRACE_LOG(resource_quota, INFO) << "Adding allocator " << allocator;
AllocatorBucket::Shard& shard = small_allocators_.SelectShard(allocator);
@ -561,9 +558,7 @@ void BasicMemoryQuota::AddNewAllocator(GrpcMemoryAllocatorImpl* allocator) {
}
void BasicMemoryQuota::RemoveAllocator(GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Removing allocator " << allocator;
}
GRPC_TRACE_LOG(resource_quota, INFO) << "Removing allocator " << allocator;
AllocatorBucket::Shard& small_shard =
small_allocators_.SelectShard(allocator);
@ -608,9 +603,8 @@ void BasicMemoryQuota::MaybeMoveAllocator(GrpcMemoryAllocatorImpl* allocator,
void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Moving allocator " << allocator << " to small";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Moving allocator " << allocator << " to small";
AllocatorBucket::Shard& old_shard = big_allocators_.SelectShard(allocator);
@ -629,9 +623,8 @@ void BasicMemoryQuota::MaybeMoveAllocatorBigToSmall(
void BasicMemoryQuota::MaybeMoveAllocatorSmallToBig(
GrpcMemoryAllocatorImpl* allocator) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
LOG(INFO) << "Moving allocator " << allocator << " to big";
}
GRPC_TRACE_LOG(resource_quota, INFO)
<< "Moving allocator " << allocator << " to big";
AllocatorBucket::Shard& old_shard = small_allocators_.SelectShard(allocator);

@ -167,11 +167,10 @@ absl::Status FileWatcherAuthorizationPolicyProvider::ForceUpdate() {
if (cb_ != nullptr) {
cb_(contents_changed, absl::OkStatus());
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_authz_api)) {
LOG(INFO) << "authorization policy reload status: successfully loaded new "
"policy\n"
<< file_contents_;
}
GRPC_TRACE_LOG(grpc_authz_api, INFO)
<< "authorization policy reload status: successfully loaded new "
"policy\n"
<< file_contents_;
return absl::OkStatus();
}

@ -511,11 +511,10 @@ grpc_call_credentials* grpc_google_refresh_token_credentials_create(
const char* json_refresh_token, void* reserved) {
grpc_auth_refresh_token token =
grpc_auth_refresh_token_create_from_string(json_refresh_token);
if (GRPC_TRACE_FLAG_ENABLED(api)) {
LOG(INFO) << "grpc_refresh_token_credentials_create(json_refresh_token="
<< create_loggable_refresh_token(&token)
<< ", reserved=" << reserved << ")";
}
GRPC_TRACE_LOG(api, INFO)
<< "grpc_refresh_token_credentials_create(json_refresh_token="
<< create_loggable_refresh_token(&token) << ", reserved=" << reserved
<< ")";
CHECK_EQ(reserved, nullptr);
return grpc_refresh_token_credentials_create_from_auth_refresh_token(token)
.release();

@ -497,6 +497,13 @@ void grpc_call_tracer_set(grpc_call* call,
return arena->SetContext<grpc_core::CallTracerAnnotationInterface>(tracer);
}
void grpc_call_tracer_set_and_manage(grpc_call* call,
grpc_core::ClientCallTracer* tracer) {
grpc_core::Arena* arena = grpc_call_get_arena(call);
arena->ManagedNew<ClientCallTracerWrapper>(tracer);
return arena->SetContext<grpc_core::CallTracerAnnotationInterface>(tracer);
}
void* grpc_call_tracer_get(grpc_call* call) {
grpc_core::Arena* arena = grpc_call_get_arena(call);
auto* call_tracer =

@ -265,6 +265,16 @@ void grpc_call_log_batch(const char* file, int line, const grpc_op* ops,
void grpc_call_tracer_set(grpc_call* call, grpc_core::ClientCallTracer* tracer);
// Sets call tracer on the call and manages its life by using the call's arena.
// When using this API, the tracer will be destroyed by grpc_call arena when
// grpc_call is about to be destroyed. The caller of this API SHOULD NOT
// manually destroy the tracer. This API is used by Python as a way of using
// Arena to manage the lifetime of the call tracer. Python needs this API
// because the tracer was created within a separate shared object library which
// doesn't have access to core functions like arena->ManagedNew<>.
void grpc_call_tracer_set_and_manage(grpc_call* call,
grpc_core::ClientCallTracer* tracer);
void* grpc_call_tracer_get(grpc_call* call);
#define GRPC_CALL_LOG_BATCH(ops, nops) \
@ -276,6 +286,15 @@ void* grpc_call_tracer_get(grpc_call* call);
uint8_t grpc_call_is_client(grpc_call* call);
class ClientCallTracerWrapper {
public:
explicit ClientCallTracerWrapper(grpc_core::ClientCallTracer* tracer)
: tracer_(tracer) {}
private:
std::unique_ptr<grpc_core::ClientCallTracer> tracer_;
};
// Return an appropriate compression algorithm for the requested compression \a
// level in the context of \a call.
grpc_compression_algorithm grpc_call_compression_for_level(

@ -533,15 +533,13 @@ void FilterStackCall::BatchControl::PostCompletion() {
FilterStackCall* call = call_;
grpc_error_handle error = batch_error_.get();
if (IsCallStatusOverrideOnCancellationEnabled()) {
// On the client side, if final call status is already known (i.e if this op
// includes recv_trailing_metadata) and if the call status is known to be
// OK, then disregard the batch error to ensure call->receiving_buffer_ is
// not cleared.
if (op_.recv_trailing_metadata && call->is_client() &&
call->status_error_.ok()) {
error = absl::OkStatus();
}
// On the client side, if final call status is already known (i.e if this op
// includes recv_trailing_metadata) and if the call status is known to be
// OK, then disregard the batch error to ensure call->receiving_buffer_ is
// not cleared.
if (op_.recv_trailing_metadata && call->is_client() &&
call->status_error_.ok()) {
error = absl::OkStatus();
}
GRPC_TRACE_VLOG(call, 2) << "tag:" << completion_data_.notify_tag.tag

@ -47,18 +47,16 @@ Timestamp BdpEstimator::CompletePing() {
1e-9 * static_cast<double>(dt_ts.tv_nsec);
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
Duration start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]:complete acc=" << accumulator_
<< " est=" << estimate_ << " dt=" << dt << " bw=" << bw / 125000.0
<< "Mbs bw_est=" << bw_est_ / 125000.0 << "Mbs";
}
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]:complete acc=" << accumulator_
<< " est=" << estimate_ << " dt=" << dt << " bw=" << bw / 125000.0
<< "Mbs bw_est=" << bw_est_ / 125000.0 << "Mbs";
CHECK(ping_state_ == PingState::STARTED);
if (accumulator_ > 2 * estimate_ / 3 && bw > bw_est_) {
estimate_ = std::max(accumulator_, estimate_ * 2);
bw_est_ = bw;
if (GRPC_TRACE_FLAG_ENABLED(bdp_estimator)) {
LOG(INFO) << "bdp[" << name_ << "]: estimate increased to " << estimate_;
}
GRPC_TRACE_LOG(bdp_estimator, INFO)
<< "bdp[" << name_ << "]: estimate increased to " << estimate_;
inter_ping_delay_ /= 2; // if the ping estimate changes,
// exponentially get faster at probing
} else if (inter_ping_delay_ < Duration::Seconds(10)) {

@ -200,11 +200,9 @@ void CallFilters::CancelDueToFailedPipeOperation(SourceLocation but_where) {
void CallFilters::PushServerTrailingMetadata(ServerMetadataHandle md) {
CHECK(md != nullptr);
if (GRPC_TRACE_FLAG_ENABLED(call)) {
LOG(INFO) << GetContext<Activity>()->DebugTag()
<< " PushServerTrailingMetadata[" << this
<< "]: " << md->DebugString() << " into " << DebugString();
}
GRPC_TRACE_LOG(call, INFO)
<< GetContext<Activity>()->DebugTag() << " PushServerTrailingMetadata["
<< this << "]: " << md->DebugString() << " into " << DebugString();
CHECK(md != nullptr);
if (call_state_.PushServerTrailingMetadata(
md->get(GrpcCallWasCancelled()).value_or(false))) {

@ -1174,17 +1174,16 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
if (response.client_stats_report_interval != Duration::Zero()) {
client_stats_report_interval_ = std::max(
Duration::Seconds(1), response.client_stats_report_interval);
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting interval = "
<< client_stats_report_interval_.millis()
<< " milliseconds";
}
} else if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting NOT enabled";
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting interval = "
<< client_stats_report_interval_.millis() << " milliseconds";
} else {
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Received initial LB response message; client load "
"reporting NOT enabled";
}
seen_initial_response_ = true;
break;
@ -1193,13 +1192,11 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
CHECK_NE(lb_call_, nullptr);
auto serverlist_wrapper =
MakeRefCounted<Serverlist>(std::move(response.serverlist));
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Serverlist with "
<< serverlist_wrapper->serverlist().size()
<< " servers received:\n"
<< serverlist_wrapper->AsText();
}
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Serverlist with " << serverlist_wrapper->serverlist().size()
<< " servers received:\n"
<< serverlist_wrapper->AsText();
seen_serverlist_ = true;
// Start sending client load report only after we start using the
// serverlist returned from the current LB call.
@ -1213,11 +1210,10 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
// Check if the serverlist differs from the previous one.
if (grpclb_policy()->serverlist_ != nullptr &&
*grpclb_policy()->serverlist_ == *serverlist_wrapper) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Incoming server list identical to current, "
"ignoring.";
}
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << grpclb_policy() << "] lb_calld=" << this
<< ": Incoming server list identical to current, "
"ignoring.";
} else { // New serverlist.
// Dispose of the fallback.
// TODO(roth): Ideally, we should stay in fallback mode until we
@ -1457,11 +1453,10 @@ GrpcLb::GrpcLb(Args args)
GRPC_ARG_GRPCLB_SUBCHANNEL_CACHE_INTERVAL_MS)
.value_or(Duration::Milliseconds(
GRPC_GRPCLB_DEFAULT_SUBCHANNEL_DELETION_DELAY_MS)))) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Will use '"
<< std::string(channel_control_helper()->GetAuthority())
<< "' as the server name for LB request.";
}
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Will use '"
<< std::string(channel_control_helper()->GetAuthority())
<< "' as the server name for LB request.";
}
void GrpcLb::ShutdownLocked() {
@ -1542,9 +1537,7 @@ class GrpcLb::NullLbTokenEndpointIterator final
};
absl::Status GrpcLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] received update";
}
GRPC_TRACE_LOG(glb, INFO) << "[grpclb " << this << "] received update";
const bool is_initial_update = lb_channel_ == nullptr;
config_ = args.config.TakeAsSubclass<GrpcLbConfig>();
CHECK(config_ != nullptr);
@ -1656,11 +1649,10 @@ void GrpcLb::StartBalancerCallLocked() {
// Init the LB call data.
CHECK(lb_calld_ == nullptr);
lb_calld_ = MakeOrphanable<BalancerCallState>(Ref());
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this
<< "] Query for backends (lb_channel: " << lb_channel_.get()
<< ", lb_calld: " << lb_calld_.get() << ")";
}
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this
<< "] Query for backends (lb_channel: " << lb_channel_.get()
<< ", lb_calld: " << lb_calld_.get() << ")";
lb_calld_->StartQuery();
}
@ -1695,9 +1687,8 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
void GrpcLb::OnBalancerCallRetryTimerLocked() {
lb_call_retry_timer_handle_.reset();
if (!shutting_down_ && lb_calld_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(glb)) {
LOG(INFO) << "[grpclb " << this << "] Restarting call to LB server";
}
GRPC_TRACE_LOG(glb, INFO)
<< "[grpclb " << this << "] Restarting call to LB server";
StartBalancerCallLocked();
}
}

@ -146,11 +146,9 @@ void HealthProducer::HealthChecker::OnConnectivityStateChangeLocked(
void HealthProducer::HealthChecker::NotifyWatchersLocked(
grpc_connectivity_state state, absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << producer_.get() << " HealthChecker "
<< this << ": reporting state " << ConnectivityStateName(state)
<< " to watchers";
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << producer_.get() << " HealthChecker " << this
<< ": reporting state " << ConnectivityStateName(state) << " to watchers";
work_serializer_->Schedule(
[self = Ref(), state, status = std::move(status)]() {
MutexLock lock(&self->producer_->mu_);
@ -285,11 +283,10 @@ class HealthProducer::HealthChecker::HealthStreamEventHandler final
void SetHealthStatusLocked(SubchannelStreamClient* client,
grpc_connectivity_state state,
const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthCheckClient " << client
<< ": setting state=" << ConnectivityStateName(state)
<< " reason=" << reason;
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthCheckClient " << client
<< ": setting state=" << ConnectivityStateName(state)
<< " reason=" << reason;
health_checker_->OnHealthWatchStatusChange(
state, state == GRPC_CHANNEL_TRANSIENT_FAILURE
? absl::UnavailableError(reason)
@ -300,11 +297,9 @@ class HealthProducer::HealthChecker::HealthStreamEventHandler final
};
void HealthProducer::HealthChecker::StartHealthStreamLocked() {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << producer_.get() << " HealthChecker "
<< this << ": creating HealthClient for \""
<< health_check_service_name_ << "\"";
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << producer_.get() << " HealthChecker " << this
<< ": creating HealthClient for \"" << health_check_service_name_ << "\"";
stream_client_ = MakeOrphanable<SubchannelStreamClient>(
producer_->connected_subchannel_, producer_->subchannel_->pollset_set(),
std::make_unique<HealthStreamEventHandler>(Ref()),
@ -356,9 +351,8 @@ void HealthProducer::Start(RefCountedPtr<Subchannel> subchannel) {
}
void HealthProducer::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << this << ": shutting down";
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this << ": shutting down";
{
MutexLock lock(&mu_);
health_checkers_.clear();
@ -406,11 +400,10 @@ void HealthProducer::RemoveWatcher(
void HealthProducer::OnConnectivityStateChange(grpc_connectivity_state state,
const absl::Status& status) {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthProducer " << this
<< ": subchannel state update: state="
<< ConnectivityStateName(state) << " status=" << status;
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthProducer " << this
<< ": subchannel state update: state=" << ConnectivityStateName(state)
<< " status=" << status;
MutexLock lock(&mu_);
state_ = state;
status_ = status;
@ -432,11 +425,10 @@ void HealthProducer::OnConnectivityStateChange(grpc_connectivity_state state,
//
HealthWatcher::~HealthWatcher() {
if (GRPC_TRACE_FLAG_ENABLED(health_check_client)) {
LOG(INFO) << "HealthWatcher " << this << ": unregistering from producer "
<< producer_.get() << " (health_check_service_name=\""
<< health_check_service_name_.value_or("N/A") << "\")";
}
GRPC_TRACE_LOG(health_check_client, INFO)
<< "HealthWatcher " << this << ": unregistering from producer "
<< producer_.get() << " (health_check_service_name=\""
<< health_check_service_name_.value_or("N/A") << "\")";
if (producer_ != nullptr) {
producer_->RemoveWatcher(this, health_check_service_name_);
}

@ -532,11 +532,10 @@ OutlierDetectionLb::Picker::Picker(OutlierDetectionLb* outlier_detection_lb,
RefCountedPtr<SubchannelPicker> picker,
bool counting_enabled)
: picker_(std::move(picker)), counting_enabled_(counting_enabled) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << outlier_detection_lb
<< "] constructed new picker " << this << " and counting "
<< "is " << (counting_enabled ? "enabled" : "disabled");
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << outlier_detection_lb
<< "] constructed new picker " << this << " and counting "
<< "is " << (counting_enabled ? "enabled" : "disabled");
}
LoadBalancingPolicy::PickResult OutlierDetectionLb::Picker::Pick(
@ -574,9 +573,8 @@ LoadBalancingPolicy::PickResult OutlierDetectionLb::Picker::Pick(
OutlierDetectionLb::OutlierDetectionLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this << "] created";
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] created";
}
OutlierDetectionLb::~OutlierDetectionLb() {
@ -586,9 +584,8 @@ OutlierDetectionLb::~OutlierDetectionLb() {
}
void OutlierDetectionLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] shutting down";
ejection_timer_.reset();
shutting_down_ = true;
// Remove the child policy's interested_parties pollset_set from the
@ -612,9 +609,8 @@ void OutlierDetectionLb::ResetBackoffLocked() {
}
absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this << "] Received update";
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] Received update";
auto old_config = std::move(config_);
// Update config.
config_ = args.config.TakeAsSubclass<OutlierDetectionLbConfig>();
@ -627,9 +623,8 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
ejection_timer_.reset();
} else if (ejection_timer_ == nullptr) {
// No timer running. Start it now.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this << "] starting timer";
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this << "] starting timer";
ejection_timer_ = MakeOrphanable<EjectionTimer>(
RefAsSubclass<OutlierDetectionLb>(), Timestamp::Now());
for (const auto& p : endpoint_state_map_) {
@ -687,11 +682,9 @@ absl::Status OutlierDetectionLb::UpdateLocked(UpdateArgs args) {
key, MakeRefCounted<EndpointState>(std::move(subchannels)));
} else if (!config_->CountingEnabled()) {
// If counting is not enabled, reset state.
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << this
<< "] counting disabled; disabling ejection for "
<< key.ToString();
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << this
<< "] counting disabled; disabling ejection for " << key.ToString();
it->second->DisableEjection();
}
});
@ -931,17 +924,14 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
const double success_rate_stdev_factor =
static_cast<double>(config.success_rate_ejection->stdev_factor) / 1000;
double ejection_threshold = mean - stdev * success_rate_stdev_factor;
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] stdev=" << stdev
<< ", ejection_threshold=" << ejection_threshold;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get() << "] stdev=" << stdev
<< ", ejection_threshold=" << ejection_threshold;
for (auto& candidate : success_rate_ejection_candidates) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] checking candidate " << candidate.first
<< ": success_rate=" << candidate.second;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] checking candidate " << candidate.first
<< ": success_rate=" << candidate.second;
if (candidate.second < ejection_threshold) {
uint32_t random_key = absl::Uniform(bit_gen_, 1, 100);
double current_percent =
@ -979,11 +969,10 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked() {
<< config.failure_percentage_ejection->enforcement_percentage;
}
for (auto& candidate : failure_percentage_ejection_candidates) {
if (GRPC_TRACE_FLAG_ENABLED(outlier_detection_lb)) {
LOG(INFO) << "[outlier_detection_lb " << parent_.get()
<< "] checking candidate " << candidate.first
<< ": success_rate=" << candidate.second;
}
GRPC_TRACE_LOG(outlier_detection_lb, INFO)
<< "[outlier_detection_lb " << parent_.get()
<< "] checking candidate " << candidate.first
<< ": success_rate=" << candidate.second;
// Extra check to make sure success rate algorithm didn't already
// eject this backend.
if (candidate.first->ejection_time().has_value()) continue;

@ -420,22 +420,16 @@ PickFirst::PickFirst(Args args)
.GetInt(GRPC_ARG_HAPPY_EYEBALLS_CONNECTION_ATTEMPT_DELAY_MS)
.value_or(250),
100, 2000))) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " created.";
}
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << this << " created.";
}
PickFirst::~PickFirst() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Destroying Pick First " << this;
}
GRPC_TRACE_LOG(pick_first, INFO) << "Destroying Pick First " << this;
CHECK(subchannel_list_ == nullptr);
}
void PickFirst::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " Shutting down";
}
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << this << " Shutting down";
shutdown_ = true;
UnsetSelectedSubchannel();
subchannel_list_.reset();
@ -444,9 +438,8 @@ void PickFirst::ShutdownLocked() {
void PickFirst::ExitIdleLocked() {
if (shutdown_) return;
if (IsIdle()) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " exiting idle";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << this << " exiting idle";
AttemptToConnectUsingLatestUpdateArgsLocked();
}
}
@ -681,11 +674,10 @@ PickFirst::SubchannelList::SubchannelData::SubchannelState::SubchannelState(
}
void PickFirst::SubchannelList::SubchannelData::SubchannelState::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] subchannel state " << this
<< " (subchannel " << subchannel_.get()
<< "): cancelling watch and unreffing subchannel";
subchannel_data_ = nullptr;
subchannel_->CancelConnectivityStateWatch(watcher_);
watcher_ = nullptr;
@ -706,9 +698,8 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::Select() {
// for the health status notification.
// If health checking is NOT enabled, report READY.
if (pick_first_->enable_health_watch_) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << pick_first_.get() << "] starting health watch";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << pick_first_.get() << "] starting health watch";
auto watcher = std::make_unique<HealthWatcher>(
pick_first_.Ref(DEBUG_LOCATION, "HealthWatcher"));
pick_first_->health_watcher_ = watcher.get();
@ -767,11 +758,10 @@ void PickFirst::SubchannelList::SubchannelData::SubchannelState::
}
// We aren't trying to connect, so we must be the selected subchannel.
CHECK(pick_first_->selected_.get() == this);
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << pick_first_.get()
<< " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << pick_first_.get()
<< " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
// Any state change is considered to be a failure of the existing
// connection. Report the failure.
auto& stats_plugins =
@ -791,11 +781,10 @@ PickFirst::SubchannelList::SubchannelData::SubchannelData(
SubchannelList* subchannel_list, size_t index,
RefCountedPtr<SubchannelInterface> subchannel)
: subchannel_list_(subchannel_list), index_(index) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index " << index_
<< ": creating subchannel data";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << subchannel_list_->policy_.get() << "] subchannel list "
<< subchannel_list_ << " index " << index_
<< ": creating subchannel data";
subchannel_state_ =
MakeOrphanable<SubchannelState>(this, std::move(subchannel));
}
@ -856,11 +845,10 @@ void PickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
// is not in the new list. In that case, we drop the current
// connection and report IDLE.
if (p->selected_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << p << "] subchannel list " << subchannel_list_
<< ": new update has no subchannels in "
<< "state READY; dropping existing connection and going IDLE";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << p << "] subchannel list " << subchannel_list_
<< ": new update has no subchannels in state READY; dropping "
"existing connection and going IDLE";
p->GoIdle();
} else {
// Start trying to connect, starting with the first subchannel.
@ -1016,18 +1004,16 @@ PickFirst::SubchannelList::SubchannelList(RefCountedPtr<PickFirst> policy,
address.address(), address.args(), args_);
if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get()
<< "] could not create subchannel for address "
<< address.ToString() << ", ignoring";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] could not create subchannel for address " << address.ToString()
<< ", ignoring";
return;
}
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] subchannel list " << this
<< " index " << subchannels_.size() << ": Created subchannel "
<< subchannel.get() << " for address " << address.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] subchannel list " << this << " index "
<< subchannels_.size() << ": Created subchannel " << subchannel.get()
<< " for address " << address.ToString();
subchannels_.emplace_back(std::make_unique<SubchannelData>(
this, subchannels_.size(), std::move(subchannel)));
});
@ -1374,23 +1360,17 @@ OldPickFirst::OldPickFirst(Args args)
.GetInt(GRPC_ARG_HAPPY_EYEBALLS_CONNECTION_ATTEMPT_DELAY_MS)
.value_or(250),
100, 2000))) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " created.";
}
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << this << " created.";
}
OldPickFirst::~OldPickFirst() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Destroying Pick First " << this;
}
GRPC_TRACE_LOG(pick_first, INFO) << "Destroying Pick First " << this;
CHECK(subchannel_list_ == nullptr);
CHECK(latest_pending_subchannel_list_ == nullptr);
}
void OldPickFirst::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " Shutting down";
}
GRPC_TRACE_LOG(pick_first, INFO) << "Pick First " << this << " Shutting down";
shutdown_ = true;
UnsetSelectedSubchannel();
subchannel_list_.reset();
@ -1400,9 +1380,8 @@ void OldPickFirst::ShutdownLocked() {
void OldPickFirst::ExitIdleLocked() {
if (shutdown_) return;
if (IsIdle()) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << this << " exiting idle";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << this << " exiting idle";
AttemptToConnectUsingLatestUpdateArgsLocked();
}
}
@ -1597,11 +1576,10 @@ OldPickFirst::SubchannelList::SubchannelData::SubchannelData(
: subchannel_list_(subchannel_list),
index_(index),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << subchannel_list_->policy_.get()
<< "] subchannel list " << subchannel_list_ << " index " << index_
<< " (subchannel " << subchannel_.get() << "): starting watch";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << subchannel_list_->policy_.get() << "] subchannel list "
<< subchannel_list_ << " index " << index_ << " (subchannel "
<< subchannel_.get() << "): starting watch";
auto watcher = std::make_unique<Watcher>(
subchannel_list_->Ref(DEBUG_LOCATION, "Watcher"), index_);
pending_watcher_ = watcher.get();
@ -1658,11 +1636,9 @@ void OldPickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
// Handle updates for the currently selected subchannel.
if (p->selected_ == this) {
CHECK(subchannel_list_ == p->subchannel_list_.get());
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p
<< " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " selected subchannel connectivity changed to "
<< ConnectivityStateName(new_state);
// Any state change is considered to be a failure of the existing
// connection.
stats_plugins.AddCounter(
@ -1677,11 +1653,10 @@ void OldPickFirst::SubchannelList::SubchannelData::OnConnectivityStateChange(
p->channel_control_helper()->RequestReresolution();
// If there is a pending update, switch to the pending update.
if (p->latest_pending_subchannel_list_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " promoting pending subchannel "
<< "list " << p->latest_pending_subchannel_list_.get()
<< " to replace " << p->subchannel_list_.get();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " promoting pending subchannel "
<< "list " << p->latest_pending_subchannel_list_.get()
<< " to replace " << p->subchannel_list_.get();
p->UnsetSelectedSubchannel();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
// Set our state to that of the pending subchannel list.
@ -1895,11 +1870,10 @@ void OldPickFirst::SubchannelList::SubchannelData::
subchannel_list_ == p->latest_pending_subchannel_list_.get());
// Case 2. Promote p->latest_pending_subchannel_list_ to p->subchannel_list_.
if (subchannel_list_ == p->latest_pending_subchannel_list_.get()) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "Pick First " << p << " promoting pending subchannel list "
<< p->latest_pending_subchannel_list_.get() << " to replace "
<< p->subchannel_list_.get();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "Pick First " << p << " promoting pending subchannel list "
<< p->latest_pending_subchannel_list_.get() << " to replace "
<< p->subchannel_list_.get();
p->UnsetSelectedSubchannel();
p->subchannel_list_ = std::move(p->latest_pending_subchannel_list_);
}
@ -1912,9 +1886,8 @@ void OldPickFirst::SubchannelList::SubchannelData::
// for the health status notification.
// If health checking is NOT enabled, report READY.
if (p->enable_health_watch_) {
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << p << "] starting health watch";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << p << "] starting health watch";
auto watcher = std::make_unique<HealthWatcher>(
p->RefAsSubclass<OldPickFirst>(DEBUG_LOCATION, "HealthWatcher"));
p->health_watcher_ = watcher.get();
@ -1959,18 +1932,16 @@ OldPickFirst::SubchannelList::SubchannelList(
address.address(), address.args(), args_);
if (subchannel == nullptr) {
// Subchannel could not be created.
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get()
<< "] could not create subchannel for address "
<< address.ToString() << ", ignoring";
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get()
<< "] could not create subchannel for address " << address.ToString()
<< ", ignoring";
return;
}
if (GRPC_TRACE_FLAG_ENABLED(pick_first)) {
LOG(INFO) << "[PF " << policy_.get() << "] subchannel list " << this
<< " index " << subchannels_.size() << ": Created subchannel "
<< subchannel.get() << " for address " << address.ToString();
}
GRPC_TRACE_LOG(pick_first, INFO)
<< "[PF " << policy_.get() << "] subchannel list " << this << " index "
<< subchannels_.size() << ": Created subchannel " << subchannel.get()
<< " for address " << address.ToString();
subchannels_.emplace_back(this, subchannels_.size(), std::move(subchannel));
});
}

@ -294,21 +294,17 @@ PriorityLb::PriorityLb(Args args)
channel_args()
.GetDurationFromIntMillis(GRPC_ARG_PRIORITY_FAILOVER_TIMEOUT_MS)
.value_or(kDefaultChildFailoverTimeout))) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] created";
}
GRPC_TRACE_LOG(priority_lb, INFO) << "[priority_lb " << this << "] created";
}
PriorityLb::~PriorityLb() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] destroying priority LB policy";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] destroying priority LB policy";
}
void PriorityLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] shutting down";
shutting_down_ = true;
children_.clear();
}
@ -316,11 +312,9 @@ void PriorityLb::ShutdownLocked() {
void PriorityLb::ExitIdleLocked() {
if (current_priority_ != UINT32_MAX) {
const std::string& child_name = config_->priorities()[current_priority_];
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this
<< "] exiting IDLE for current priority " << current_priority_
<< " child " << child_name;
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] exiting IDLE for current priority "
<< current_priority_ << " child " << child_name;
children_[child_name]->ExitIdleLocked();
}
}
@ -330,9 +324,8 @@ void PriorityLb::ResetBackoffLocked() {
}
absl::Status PriorityLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this << "] received update";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this << "] received update";
// Update config.
config_ = args.config.TakeAsSubclass<PriorityLbConfig>();
// Update args.
@ -451,11 +444,10 @@ void PriorityLb::ChoosePriorityLocked() {
}
// If we didn't find any priority to try, pick the first one in state
// CONNECTING.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << this
<< "] no priority reachable, checking for CONNECTING priority to "
"delegate to";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << this
<< "] no priority reachable, checking for CONNECTING priority to "
"delegate to";
for (uint32_t priority = 0; priority < config_->priorities().size();
++priority) {
// If the child for the priority does not exist yet, create it.
@ -530,11 +522,10 @@ PriorityLb::ChildPriority::DeactivationTimer::DeactivationTimer(
void PriorityLb::ChildPriority::DeactivationTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): reactivating";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): reactivating";
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
->Cancel(*timer_handle_);
@ -588,11 +579,10 @@ PriorityLb::ChildPriority::FailoverTimer::FailoverTimer(
void PriorityLb::ChildPriority::FailoverTimer::Orphan() {
if (timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " ("
<< child_priority_.get() << "): cancelling failover timer";
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << child_priority_->priority_policy_.get()
<< "] child " << child_priority_->name_ << " (" << child_priority_.get()
<< "): cancelling failover timer";
child_priority_->priority_policy_->channel_control_helper()
->GetEventEngine()
->Cancel(*timer_handle_);
@ -686,11 +676,10 @@ absl::Status PriorityLb::ChildPriority::UpdateLocked(
update_args.resolution_note = priority_policy_->resolution_note_;
update_args.args = priority_policy_->args_;
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): updating child policy handler "
<< child_policy_.get();
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): updating child policy handler "
<< child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}
@ -704,11 +693,10 @@ PriorityLb::ChildPriority::CreateChildPolicyLocked(const ChannelArgs& args) {
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&priority_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(priority_lb)) {
LOG(INFO) << "[priority_lb " << priority_policy_.get() << "] child "
<< name_ << " (" << this << "): created new child policy handler "
<< lb_policy.get();
}
GRPC_TRACE_LOG(priority_lb, INFO)
<< "[priority_lb " << priority_policy_.get() << "] child " << name_
<< " (" << this << "): created new child policy handler "
<< lb_policy.get();
// Add the parent's interested_parties pollset_set to that of the newly
// created child policy. This will make the child policy progress upon
// activity on the parent LB, which in turn is tied to the application's call.

@ -597,21 +597,16 @@ void RingHash::RingHashEndpoint::OnStateUpdate(
//
RingHash::RingHash(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] Created";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO) << "[RH " << this << "] Created";
}
RingHash::~RingHash() {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] Destroying Ring Hash policy";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "] Destroying Ring Hash policy";
}
void RingHash::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] Shutting down";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO) << "[RH " << this << "] Shutting down";
shutdown_ = true;
endpoint_map_.clear();
}
@ -625,9 +620,7 @@ void RingHash::ResetBackoffLocked() {
absl::Status RingHash::UpdateLocked(UpdateArgs args) {
// Check address list.
if (args.addresses.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] received update";
}
GRPC_TRACE_LOG(ring_hash_lb, INFO) << "[RH " << this << "] received update";
// De-dup endpoints, taking weight into account.
endpoints_.clear();
std::map<EndpointAddressSet, size_t> endpoint_indices;
@ -641,11 +634,10 @@ absl::Status RingHash::UpdateLocked(UpdateArgs args) {
endpoint.args().GetInt(GRPC_ARG_ADDRESS_WEIGHT).value_or(1);
int prev_weight_arg =
prev_endpoint.args().GetInt(GRPC_ARG_ADDRESS_WEIGHT).value_or(1);
if (GRPC_TRACE_FLAG_ENABLED(ring_hash_lb)) {
LOG(INFO) << "[RH " << this << "] merging duplicate endpoint for "
<< key.ToString() << ", combined weight "
<< weight_arg + prev_weight_arg;
}
GRPC_TRACE_LOG(ring_hash_lb, INFO)
<< "[RH " << this << "] merging duplicate endpoint for "
<< key.ToString() << ", combined weight "
<< weight_arg + prev_weight_arg;
prev_endpoint = EndpointAddresses(
prev_endpoint.addresses(),
prev_endpoint.args().Set(GRPC_ARG_ADDRESS_WEIGHT,

@ -353,7 +353,8 @@ class RlsLb final : public LoadBalancingPolicy {
// is called after releasing it.
//
// Both methods grab the data they need from the parent object.
void StartUpdate() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
void StartUpdate(OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
absl::Status MaybeFinishUpdate() ABSL_LOCKS_EXCLUDED(&RlsLb::mu_);
void ExitIdleLocked() {
@ -397,14 +398,14 @@ class RlsLb final : public LoadBalancingPolicy {
};
// Note: We are forced to disable lock analysis here because
// Orphan() is called by Unref() which is called by RefCountedPtr<>, which
// Orphaned() is called by Unref() which is called by RefCountedPtr<>, which
// cannot have lock annotations for this particular caller.
void Orphaned() override ABSL_NO_THREAD_SAFETY_ANALYSIS;
RefCountedPtr<RlsLb> lb_policy_;
std::string target_;
bool is_shutdown_ = false;
bool is_shutdown_ = false; // Protected by WorkSerializer
OrphanablePtr<ChildPolicyHandler> child_policy_;
RefCountedPtr<LoadBalancingPolicy::Config> pending_config_;
@ -503,12 +504,25 @@ class RlsLb final : public LoadBalancingPolicy {
// Returns a list of child policy wrappers on which FinishUpdate()
// needs to be called after releasing the lock.
std::vector<ChildPolicyWrapper*> OnRlsResponseLocked(
ResponseInfo response, std::unique_ptr<BackOff> backoff_state)
ResponseInfo response, std::unique_ptr<BackOff> backoff_state,
OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
// Moves entry to the end of the LRU list.
void MarkUsed() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
// Takes entries from child_policy_wrappers_ and appends them to the end
// of \a child_policy_wrappers.
void TakeChildPolicyWrappers(
std::vector<RefCountedPtr<ChildPolicyWrapper>>* child_policy_wrappers)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_) {
child_policy_wrappers->insert(
child_policy_wrappers->end(),
std::make_move_iterator(child_policy_wrappers_.begin()),
std::make_move_iterator(child_policy_wrappers_.end()));
child_policy_wrappers_.clear();
}
private:
class BackoffTimer final : public InternallyRefCounted<BackoffTimer> {
public:
@ -566,19 +580,24 @@ class RlsLb final : public LoadBalancingPolicy {
// the caller. Otherwise, the entry found is returned to the caller. The
// entry returned to the user is considered recently used and its order in
// the LRU list of the cache is updated.
Entry* FindOrInsert(const RequestKey& key)
Entry* FindOrInsert(const RequestKey& key,
std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
// Resizes the cache. If the new cache size is greater than the current size
// of the cache, do nothing. Otherwise, evict the oldest entries that
// exceed the new size limit of the cache.
void Resize(size_t bytes) ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
void Resize(size_t bytes, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
// Resets backoff of all the cache entries.
void ResetAllBackoff() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
// Shutdown the cache; clean-up and orphan all the stored cache entries.
void Shutdown() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
GRPC_MUST_USE_RESULT std::vector<RefCountedPtr<ChildPolicyWrapper>>
Shutdown() ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
void ReportMetricsLocked(CallbackMetricReporter& reporter)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
@ -594,7 +613,9 @@ class RlsLb final : public LoadBalancingPolicy {
// Evicts oversized cache elements when the current size is greater than
// the specified limit.
void MaybeShrinkSize(size_t bytes)
void MaybeShrinkSize(size_t bytes,
std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&RlsLb::mu_);
RlsLb* lb_policy_;
@ -805,11 +826,9 @@ RlsLb::ChildPolicyWrapper::ChildPolicyWrapper(RefCountedPtr<RlsLb> lb_policy,
}
void RlsLb::ChildPolicyWrapper::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: shutdown";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] ChildPolicyWrapper=" << this
<< " [" << target_ << "]: shutdown";
is_shutdown_ = true;
lb_policy_->child_policy_map_.erase(target_);
if (child_policy_ != nullptr) {
@ -859,7 +878,8 @@ absl::optional<Json> InsertOrUpdateChildPolicyField(const std::string& field,
return Json::FromArray(std::move(array));
}
void RlsLb::ChildPolicyWrapper::StartUpdate() {
void RlsLb::ChildPolicyWrapper::StartUpdate(
OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete) {
ValidationErrors errors;
auto child_policy_config = InsertOrUpdateChildPolicyField(
lb_policy_->config_->child_policy_config_target_field_name(), target_,
@ -876,15 +896,13 @@ void RlsLb::ChildPolicyWrapper::StartUpdate() {
*child_policy_config);
// Returned RLS target fails the validation.
if (!config.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "]: config failed to parse: " << config.status();
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] ChildPolicyWrapper=" << this
<< " [" << target_ << "]: config failed to parse: " << config.status();
pending_config_.reset();
picker_ = MakeRefCounted<TransientFailurePicker>(
absl::UnavailableError(config.status().message()));
child_policy_.reset();
*child_policy_to_delete = std::move(child_policy_);
} else {
pending_config_ = std::move(*config);
}
@ -913,11 +931,10 @@ absl::Status RlsLb::ChildPolicyWrapper::MaybeFinishUpdate() {
lb_policy_->interested_parties());
}
// Send the child the updated config.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get()
<< "] ChildPolicyWrapper=" << this << " [" << target_
<< "], updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] ChildPolicyWrapper=" << this
<< " [" << target_ << "], updating child policy handler "
<< child_policy_.get();
UpdateArgs update_args;
update_args.config = std::move(pending_config_);
update_args.addresses = lb_policy_->addresses_;
@ -939,9 +956,9 @@ void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
<< ": UpdateState(state=" << ConnectivityStateName(state)
<< ", status=" << status << ", picker=" << picker.get() << ")";
}
if (wrapper_->is_shutdown_) return;
{
MutexLock lock(&wrapper_->lb_policy_->mu_);
if (wrapper_->is_shutdown_) return;
// TODO(roth): It looks like this ignores subsequent TF updates that
// might change the status used to fail picks, which seems wrong.
if (wrapper_->connectivity_state_ == GRPC_CHANNEL_TRANSIENT_FAILURE &&
@ -951,7 +968,8 @@ void RlsLb::ChildPolicyWrapper::ChildPolicyHelper::UpdateState(
wrapper_->connectivity_state_ = state;
DCHECK(picker != nullptr);
if (picker != nullptr) {
wrapper_->picker_ = std::move(picker);
// We want to unref the picker after we release the lock.
wrapper_->picker_.swap(picker);
}
}
wrapper_->lb_policy_->UpdatePickerLocked();
@ -1199,18 +1217,19 @@ RlsLb::Cache::Entry::Entry(RefCountedPtr<RlsLb> lb_policy,
lb_policy_->cache_.lru_list_.end(), key)) {}
void RlsLb::Cache::Entry::Orphan() {
// We should be holding RlsLB::mu_.
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] cache entry=" << this << " "
<< lru_iterator_->ToString() << ": cache entry evicted";
is_shutdown_ = true;
lb_policy_->cache_.lru_list_.erase(lru_iterator_);
lru_iterator_ = lb_policy_->cache_.lru_list_.end(); // Just in case.
CHECK(child_policy_wrappers_.empty());
backoff_state_.reset();
if (backoff_timer_ != nullptr) {
backoff_timer_.reset();
lb_policy_->UpdatePickerAsync();
}
child_policy_wrappers_.clear();
Unref(DEBUG_LOCATION, "Orphan");
}
@ -1289,7 +1308,8 @@ void RlsLb::Cache::Entry::MarkUsed() {
std::vector<RlsLb::ChildPolicyWrapper*>
RlsLb::Cache::Entry::OnRlsResponseLocked(
ResponseInfo response, std::unique_ptr<BackOff> backoff_state) {
ResponseInfo response, std::unique_ptr<BackOff> backoff_state,
OrphanablePtr<ChildPolicyHandler>* child_policy_to_delete) {
// Move the entry to the end of the LRU list.
MarkUsed();
// If the request failed, store the failed status and update the
@ -1350,7 +1370,7 @@ RlsLb::Cache::Entry::OnRlsResponseLocked(
if (it == lb_policy_->child_policy_map_.end()) {
auto new_child = MakeRefCounted<ChildPolicyWrapper>(
lb_policy_.Ref(DEBUG_LOCATION, "ChildPolicyWrapper"), target);
new_child->StartUpdate();
new_child->StartUpdate(child_policy_to_delete);
child_policies_to_finish_update.push_back(new_child.get());
new_child_policy_wrappers.emplace_back(std::move(new_child));
} else {
@ -1387,12 +1407,15 @@ RlsLb::Cache::Entry* RlsLb::Cache::Find(const RequestKey& key) {
return it->second.get();
}
RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(
const RequestKey& key, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete) {
auto it = map_.find(key);
// If not found, create new entry.
if (it == map_.end()) {
size_t entry_size = EntrySizeForKey(key);
MaybeShrinkSize(size_limit_ - std::min(size_limit_, entry_size));
MaybeShrinkSize(size_limit_ - std::min(size_limit_, entry_size),
child_policy_wrappers_to_delete);
Entry* entry = new Entry(
lb_policy_->RefAsSubclass<RlsLb>(DEBUG_LOCATION, "CacheEntry"), key);
map_.emplace(key, OrphanablePtr<Entry>(entry));
@ -1410,11 +1433,13 @@ RlsLb::Cache::Entry* RlsLb::Cache::FindOrInsert(const RequestKey& key) {
return it->second.get();
}
void RlsLb::Cache::Resize(size_t bytes) {
void RlsLb::Cache::Resize(size_t bytes,
std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] resizing cache to " << bytes << " bytes";
size_limit_ = bytes;
MaybeShrinkSize(size_limit_);
MaybeShrinkSize(size_limit_, child_policy_wrappers_to_delete);
}
void RlsLb::Cache::ResetAllBackoff() {
@ -1424,17 +1449,22 @@ void RlsLb::Cache::ResetAllBackoff() {
lb_policy_->UpdatePickerAsync();
}
void RlsLb::Cache::Shutdown() {
std::vector<RefCountedPtr<RlsLb::ChildPolicyWrapper>> RlsLb::Cache::Shutdown() {
std::vector<RefCountedPtr<ChildPolicyWrapper>>
child_policy_wrappers_to_delete;
for (auto& entry : map_) {
entry.second->TakeChildPolicyWrappers(&child_policy_wrappers_to_delete);
}
map_.clear();
lru_list_.clear();
if (cleanup_timer_handle_.has_value() &&
lb_policy_->channel_control_helper()->GetEventEngine()->Cancel(
*cleanup_timer_handle_)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] cache cleanup timer canceled";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] cache cleanup timer canceled";
}
cleanup_timer_handle_.reset();
return child_policy_wrappers_to_delete;
}
void RlsLb::Cache::ReportMetricsLocked(CallbackMetricReporter& reporter) {
@ -1468,15 +1498,17 @@ void RlsLb::Cache::StartCleanupTimer() {
}
void RlsLb::Cache::OnCleanupTimer() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_ << "] cache cleanup timer fired";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_ << "] cache cleanup timer fired";
std::vector<RefCountedPtr<ChildPolicyWrapper>>
child_policy_wrappers_to_delete;
MutexLock lock(&lb_policy_->mu_);
if (!cleanup_timer_handle_.has_value()) return;
if (lb_policy_->is_shutdown_) return;
for (auto it = map_.begin(); it != map_.end();) {
if (GPR_UNLIKELY(it->second->ShouldRemove() && it->second->CanEvict())) {
size_ -= it->second->Size();
it->second->TakeChildPolicyWrappers(&child_policy_wrappers_to_delete);
it = map_.erase(it);
} else {
++it;
@ -1490,7 +1522,9 @@ size_t RlsLb::Cache::EntrySizeForKey(const RequestKey& key) {
return (key.Size() * 2) + sizeof(Entry);
}
void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
void RlsLb::Cache::MaybeShrinkSize(
size_t bytes, std::vector<RefCountedPtr<ChildPolicyWrapper>>*
child_policy_wrappers_to_delete) {
while (size_ > bytes) {
auto lru_it = lru_list_.begin();
if (GPR_UNLIKELY(lru_it == lru_list_.end())) break;
@ -1501,13 +1535,12 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
<< "[rlslb " << lb_policy_ << "] LRU eviction: removing entry "
<< map_it->second.get() << " " << lru_it->ToString();
size_ -= map_it->second->Size();
map_it->second->TakeChildPolicyWrappers(child_policy_wrappers_to_delete);
map_.erase(map_it);
}
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_
<< "] LRU pass complete: desired size=" << bytes
<< " size=" << size_;
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_
<< "] LRU pass complete: desired size=" << bytes << " size=" << size_;
}
//
@ -1517,11 +1550,10 @@ void RlsLb::Cache::MaybeShrinkSize(size_t bytes) {
void RlsLb::RlsChannel::StateWatcher::OnConnectivityStateChange(
grpc_connectivity_state new_state, const absl::Status& status) {
auto* lb_policy = rls_channel_->lb_policy_.get();
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy << "] RlsChannel=" << rls_channel_.get()
<< " StateWatcher=" << this << ": state changed to "
<< ConnectivityStateName(new_state) << " (" << status << ")";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy << "] RlsChannel=" << rls_channel_.get()
<< " StateWatcher=" << this << ": state changed to "
<< ConnectivityStateName(new_state) << " (" << status << ")";
if (rls_channel_->is_shutdown_) return;
MutexLock lock(&lb_policy->mu_);
if (new_state == GRPC_CHANNEL_READY && was_transient_failure_) {
@ -1614,11 +1646,10 @@ RlsLb::RlsChannel::RlsChannel(RefCountedPtr<RlsLb> lb_policy)
channel_.reset(Channel::FromC(
grpc_channel_create(lb_policy_->config_->lookup_service().c_str(),
creds.get(), args.ToC().get())));
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ": created channel " << channel_.get() << " for "
<< lb_policy_->config_->lookup_service();
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] RlsChannel=" << this
<< ": created channel " << channel_.get() << " for "
<< lb_policy_->config_->lookup_service();
if (channel_ != nullptr) {
// Set up channelz linkage.
channelz::ChannelNode* child_channelz_node = channel_->channelz_node();
@ -1821,19 +1852,22 @@ void RlsLb::RlsRequest::OnRlsCallCompleteLocked(grpc_error_handle error) {
grpc_call_unref(call_);
call_ = nullptr;
// Return result to cache.
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << lb_policy_.get() << "] rls_request=" << this
<< " " << key_.ToString()
<< ": response info: " << response.ToString();
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << lb_policy_.get() << "] rls_request=" << this << " "
<< key_.ToString() << ": response info: " << response.ToString();
std::vector<ChildPolicyWrapper*> child_policies_to_finish_update;
std::vector<RefCountedPtr<ChildPolicyWrapper>>
child_policy_wrappers_to_delete;
OrphanablePtr<ChildPolicyHandler> child_policy_to_delete;
{
MutexLock lock(&lb_policy_->mu_);
if (lb_policy_->is_shutdown_) return;
rls_channel_->ReportResponseLocked(response.status.ok());
Cache::Entry* cache_entry = lb_policy_->cache_.FindOrInsert(key_);
Cache::Entry* cache_entry =
lb_policy_->cache_.FindOrInsert(key_, &child_policy_wrappers_to_delete);
child_policies_to_finish_update = cache_entry->OnRlsResponseLocked(
std::move(response), std::move(backoff_state_));
std::move(response), std::move(backoff_state_),
&child_policy_to_delete);
lb_policy_->request_map_.erase(key_);
}
// Now that we've released the lock, finish the update on any newly
@ -1932,19 +1966,8 @@ RlsLb::RlsLb(Args args)
instance_uuid_(channel_args()
.GetOwnedString(GRPC_ARG_TEST_ONLY_RLS_INSTANCE_ID)
.value_or(GenerateUUID())),
cache_(this),
registered_metric_callback_(
channel_control_helper()->GetStatsPluginGroup().RegisterCallback(
[rls_lb = RefAsSubclass<RlsLb>(DEBUG_LOCATION,
"RlsLB Metric Callback")](
CallbackMetricReporter& reporter) {
MutexLock lock(&rls_lb->mu_);
rls_lb->cache_.ReportMetricsLocked(reporter);
},
Duration::Seconds(5), kMetricCacheSize, kMetricCacheEntries)) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] policy created";
}
cache_(this) {
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] policy created";
}
bool EndpointsEqual(
@ -1969,9 +1992,7 @@ bool EndpointsEqual(
}
absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] policy updated";
}
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] policy updated";
update_in_progress_ = true;
// Swap out config.
RefCountedPtr<RlsLbConfig> old_config = std::move(config_);
@ -2004,16 +2025,14 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
if (old_config == nullptr ||
config_->default_target() != old_config->default_target()) {
if (config_->default_target().empty()) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] unsetting default target";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] unsetting default target";
default_child_policy_.reset();
} else {
auto it = child_policy_map_.find(config_->default_target());
if (it == child_policy_map_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] creating new default target";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] creating new default target";
default_child_policy_ = MakeRefCounted<ChildPolicyWrapper>(
RefAsSubclass<RlsLb>(DEBUG_LOCATION, "ChildPolicyWrapper"),
config_->default_target());
@ -2027,6 +2046,9 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
}
}
// Now grab the lock to swap out the state it guards.
std::vector<RefCountedPtr<ChildPolicyWrapper>>
child_policy_wrappers_to_delete;
OrphanablePtr<ChildPolicyHandler> child_policy_to_delete;
{
MutexLock lock(&mu_);
// Swap out RLS channel if needed.
@ -2038,28 +2060,27 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
// Resize cache if needed.
if (old_config == nullptr ||
config_->cache_size_bytes() != old_config->cache_size_bytes()) {
cache_.Resize(static_cast<size_t>(config_->cache_size_bytes()));
cache_.Resize(static_cast<size_t>(config_->cache_size_bytes()),
&child_policy_wrappers_to_delete);
}
// Start update of child policies if needed.
if (update_child_policies) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] starting child policy updates";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] starting child policy updates";
for (auto& p : child_policy_map_) {
p.second->StartUpdate();
p.second->StartUpdate(&child_policy_to_delete);
}
} else if (created_default_child) {
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] starting default child policy update";
default_child_policy_->StartUpdate();
default_child_policy_->StartUpdate(&child_policy_to_delete);
}
}
// Now that we've released the lock, finish update of child policies.
std::vector<std::string> errors;
if (update_child_policies) {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] finishing child policy updates";
}
GRPC_TRACE_LOG(rls_lb, INFO)
<< "[rlslb " << this << "] finishing child policy updates";
for (auto& p : child_policy_map_) {
absl::Status status = p.second->MaybeFinishUpdate();
if (!status.ok()) {
@ -2077,6 +2098,20 @@ absl::Status RlsLb::UpdateLocked(UpdateArgs args) {
}
}
update_in_progress_ = false;
// On the initial update only, we set the gauge metric callback. We
// can't do this before the initial update, because otherwise the
// callback could be invoked before we've set state that we need for
// the label values (e.g., we'd add metrics with empty string for the
// RLS server name).
if (registered_metric_callback_ == nullptr) {
registered_metric_callback_ =
channel_control_helper()->GetStatsPluginGroup().RegisterCallback(
[this](CallbackMetricReporter& reporter) {
MutexLock lock(&mu_);
cache_.ReportMetricsLocked(reporter);
},
Duration::Seconds(5), kMetricCacheSize, kMetricCacheEntries);
}
// In principle, we need to update the picker here only if the config
// fields used by the picker have changed. However, it seems fragile
// to check individual fields, since the picker logic could change in
@ -2111,18 +2146,22 @@ void RlsLb::ResetBackoffLocked() {
}
void RlsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] policy shutdown";
}
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] policy shutdown";
registered_metric_callback_.reset();
MutexLock lock(&mu_);
is_shutdown_ = true;
config_.reset(DEBUG_LOCATION, "ShutdownLocked");
RefCountedPtr<ChildPolicyWrapper> child_policy_to_delete;
std::vector<RefCountedPtr<ChildPolicyWrapper>>
child_policy_wrappers_to_delete;
OrphanablePtr<RlsChannel> rls_channel_to_delete;
{
MutexLock lock(&mu_);
is_shutdown_ = true;
config_.reset(DEBUG_LOCATION, "ShutdownLocked");
child_policy_wrappers_to_delete = cache_.Shutdown();
request_map_.clear();
rls_channel_to_delete = std::move(rls_channel_);
child_policy_to_delete = std::move(default_child_policy_);
}
channel_args_ = ChannelArgs();
cache_.Shutdown();
request_map_.clear();
rls_channel_.reset();
default_child_policy_.reset();
}
void RlsLb::UpdatePickerAsync() {
@ -2155,9 +2194,7 @@ void RlsLb::UpdatePickerLocked() {
// all children. This avoids unnecessary picker churn while an update
// is being propagated to our children.
if (update_in_progress_) return;
if (GRPC_TRACE_FLAG_ENABLED(rls_lb)) {
LOG(INFO) << "[rlslb " << this << "] updating picker";
}
GRPC_TRACE_LOG(rls_lb, INFO) << "[rlslb " << this << "] updating picker";
grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
if (!child_policy_map_.empty()) {
state = GRPC_CHANNEL_TRANSIENT_FAILURE;

@ -196,11 +196,9 @@ RoundRobin::Picker::Picker(
RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs args) {
size_t index = last_picked_index_.fetch_add(1, std::memory_order_relaxed) %
pickers_.size();
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << parent_ << " picker " << this
<< "] using picker index " << index
<< ", picker=" << pickers_[index].get();
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << parent_ << " picker " << this << "] using picker index "
<< index << ", picker=" << pickers_[index].get();
return pickers_[index]->Pick(args);
}
@ -209,23 +207,18 @@ RoundRobin::PickResult RoundRobin::Picker::Pick(PickArgs args) {
//
RoundRobin::RoundRobin(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] Created";
}
GRPC_TRACE_LOG(round_robin, INFO) << "[RR " << this << "] Created";
}
RoundRobin::~RoundRobin() {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] Destroying Round Robin policy";
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << this << "] Destroying Round Robin policy";
CHECK(endpoint_list_ == nullptr);
CHECK(latest_pending_endpoint_list_ == nullptr);
}
void RoundRobin::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] Shutting down";
}
GRPC_TRACE_LOG(round_robin, INFO) << "[RR " << this << "] Shutting down";
shutdown_ = true;
endpoint_list_.reset();
latest_pending_endpoint_list_.reset();
@ -241,9 +234,7 @@ void RoundRobin::ResetBackoffLocked() {
absl::Status RoundRobin::UpdateLocked(UpdateArgs args) {
EndpointAddressesIterator* addresses = nullptr;
if (args.addresses.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << this << "] received update";
}
GRPC_TRACE_LOG(round_robin, INFO) << "[RR " << this << "] received update";
addresses = args.addresses->get();
} else {
GRPC_TRACE_LOG(round_robin, INFO)
@ -416,11 +407,10 @@ void RoundRobin::RoundRobinEndpointList::
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));
} else if (num_transient_failure_ == size()) {
if (GRPC_TRACE_FLAG_ENABLED(round_robin)) {
LOG(INFO) << "[RR " << round_robin
<< "] reporting TRANSIENT_FAILURE with child list " << this
<< ": " << status_for_tf;
}
GRPC_TRACE_LOG(round_robin, INFO)
<< "[RR " << round_robin
<< "] reporting TRANSIENT_FAILURE with child list " << this << ": "
<< status_for_tf;
if (!status_for_tf.ok()) {
last_failure_ = absl::UnavailableError(
absl::StrCat("connections to all backends failing; last error: ",

@ -555,11 +555,10 @@ WeightedRoundRobin::Picker::Picker(RefCountedPtr<WeightedRoundRobin> wrr,
}
global_stats().IncrementWrrSubchannelListSize(endpoint_list->size());
global_stats().IncrementWrrSubchannelReadySize(endpoints_.size());
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] created picker from endpoint_list=" << endpoint_list
<< " with " << endpoints_.size() << " subchannels";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] created picker from endpoint_list=" << endpoint_list << " with "
<< endpoints_.size() << " subchannels";
// Note: BuildSchedulerAndStartTimerLocked() passes out pointers to `this`,
// so we need to ensure that we really hold timer_mu_.
MutexLock lock(&timer_mu_);
@ -584,11 +583,9 @@ WeightedRoundRobin::PickResult WeightedRoundRobin::Picker::Pick(PickArgs args) {
size_t index = PickIndex();
CHECK(index < endpoints_.size());
auto& endpoint_info = endpoints_[index];
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] returning index " << index
<< ", picker=" << endpoint_info.picker.get();
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this << "] returning index "
<< index << ", picker=" << endpoint_info.picker.get();
auto result = endpoint_info.picker->Pick(args);
// Collect per-call utilization data if needed.
if (!config_->enable_oob_load_report()) {
@ -665,11 +662,10 @@ void WeightedRoundRobin::Picker::BuildSchedulerAndStartTimerLocked() {
scheduler_ = std::move(scheduler);
}
// Start timer.
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr_.get() << " picker " << this
<< "] scheduling timer for "
<< config_->weight_update_period().ToString();
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr_.get() << " picker " << this
<< "] scheduling timer for "
<< config_->weight_update_period().ToString();
// It's insufficient to hold the implicit constructor lock here, a real lock
// over timer_mu_ is needed: we update timer_handle_ after the timer is
// scheduled, but it may run on another thread before that occurs, causing a
@ -713,17 +709,15 @@ WeightedRoundRobin::WeightedRoundRobin(Args args)
}
WeightedRoundRobin::~WeightedRoundRobin() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] Destroying Round Robin policy";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Destroying Round Robin policy";
CHECK(endpoint_list_ == nullptr);
CHECK(latest_pending_endpoint_list_ == nullptr);
}
void WeightedRoundRobin::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] Shutting down";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] Shutting down";
shutdown_ = true;
endpoint_list_.reset();
latest_pending_endpoint_list_.reset();
@ -741,9 +735,8 @@ absl::Status WeightedRoundRobin::UpdateLocked(UpdateArgs args) {
config_ = args.config.TakeAsSubclass<WeightedRoundRobinConfig>();
std::shared_ptr<EndpointAddressesIterator> addresses;
if (args.addresses.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << this << "] received update";
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << this << "] received update";
// Weed out duplicate endpoints. Also sort the endpoints so that if
// the set of endpoints doesn't change, their indexes in the endpoint
// list don't change, since this avoids unnecessary churn in the
@ -992,11 +985,9 @@ void WeightedRoundRobin::WrrEndpointList::
GRPC_CHANNEL_CONNECTING, absl::Status(),
MakeRefCounted<QueuePicker>(nullptr));
} else if (num_transient_failure_ == size()) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_round_robin_lb)) {
LOG(INFO) << "[WRR " << wrr
<< "] reporting TRANSIENT_FAILURE with endpoint list " << this
<< ": " << status_for_tf;
}
GRPC_TRACE_LOG(weighted_round_robin_lb, INFO)
<< "[WRR " << wrr << "] reporting TRANSIENT_FAILURE with endpoint list "
<< this << ": " << status_for_tf;
if (!status_for_tf.ok()) {
last_failure_ = absl::UnavailableError(
absl::StrCat("connections to all backends failing; last error: ",

@ -284,9 +284,8 @@ WeightedTargetLb::PickResult WeightedTargetLb::WeightedPicker::Pick(
WeightedTargetLb::WeightedTargetLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] created";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] created";
}
WeightedTargetLb::~WeightedTargetLb() {
@ -296,9 +295,8 @@ WeightedTargetLb::~WeightedTargetLb() {
}
void WeightedTargetLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] shutting down";
shutting_down_ = true;
targets_.clear();
}
@ -309,9 +307,8 @@ void WeightedTargetLb::ResetBackoffLocked() {
absl::Status WeightedTargetLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << this << "] received update";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << this << "] received update";
update_in_progress_ = true;
// Update config.
config_ = args.config.TakeAsSubclass<WeightedTargetLbConfig>();
@ -528,20 +525,16 @@ WeightedTargetLb::WeightedChild::WeightedChild(
}
WeightedTargetLb::WeightedChild::~WeightedChild() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": destroying child";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": destroying child";
weighted_target_policy_.reset(DEBUG_LOCATION, "WeightedChild");
}
void WeightedTargetLb::WeightedChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": shutting down child";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": shutting down child";
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
@ -566,11 +559,10 @@ WeightedTargetLb::WeightedChild::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&weighted_target_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": created new child policy handler " << lb_policy.get();
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": created new child policy handler " << lb_policy.get();
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
@ -594,11 +586,9 @@ absl::Status WeightedTargetLb::WeightedChild::UpdateLocked(
weight_ = config.weight;
// Reactivate if needed.
if (delayed_removal_timer_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": reactivating";
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_ << ": reactivating";
delayed_removal_timer_.reset();
}
// Create child policy if needed.
@ -613,11 +603,10 @@ absl::Status WeightedTargetLb::WeightedChild::UpdateLocked(
update_args.resolution_note = resolution_note;
update_args.args = std::move(args);
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(weighted_target_lb)) {
LOG(INFO) << "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(weighted_target_lb, INFO)
<< "[weighted_target_lb " << weighted_target_policy_.get()
<< "] WeightedChild " << this << " " << name_
<< ": updating child policy handler " << child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}

@ -183,21 +183,16 @@ class CdsLb final : public LoadBalancingPolicy {
//
CdsLb::CdsLb(Args args) : LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] created";
}
GRPC_TRACE_LOG(cds_lb, INFO) << "[cdslb " << this << "] created";
}
CdsLb::~CdsLb() {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] destroying cds LB policy";
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] destroying cds LB policy";
}
void CdsLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] shutting down";
}
GRPC_TRACE_LOG(cds_lb, INFO) << "[cdslb " << this << "] shutting down";
shutting_down_ = true;
ResetState();
}
@ -280,11 +275,10 @@ class PriorityEndpointIterator final : public EndpointAddressesIterator {
absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
// Get new config.
auto new_config = args.config.TakeAsSubclass<CdsLbConfig>();
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this
<< "] received update: cluster=" << new_config->cluster()
<< " is_dynamic=" << new_config->is_dynamic();
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this
<< "] received update: cluster=" << new_config->cluster()
<< " is_dynamic=" << new_config->is_dynamic();
CHECK(new_config != nullptr);
// Cluster name should never change, because we should use a different
// child name in xds_cluster_manager in that case.
@ -295,11 +289,9 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
}
// Start dynamic subscription if needed.
if (new_config->is_dynamic() && subscription_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this
<< "] obtaining dynamic subscription for cluster "
<< cluster_name_;
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] obtaining dynamic subscription for cluster "
<< cluster_name_;
auto* dependency_mgr = args.args.GetObject<XdsDependencyManager>();
if (dependency_mgr == nullptr) {
// Should never happen.
@ -326,11 +318,10 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
// If we are already subscribed, it's possible that we just
// recently subscribed but another update came through before we
// got the new cluster, in which case it will still be missing.
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this
<< "] xDS config has no entry for dynamic cluster "
<< cluster_name_ << ", waiting for subsequent update";
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this
<< "] xDS config has no entry for dynamic cluster " << cluster_name_
<< ", waiting for subsequent update";
// Stay in CONNECTING until we get an update that has the cluster.
return absl::OkStatus();
}
@ -452,11 +443,9 @@ absl::Status CdsLb::UpdateLocked(UpdateArgs args) {
}
grpc_pollset_set_add_pollset_set(child_policy_->interested_parties(),
interested_parties());
if (GRPC_TRACE_FLAG_ENABLED(cds_lb)) {
LOG(INFO) << "[cdslb " << this << "] created child policy "
<< (*child_config)->name() << " (" << child_policy_.get()
<< ")";
}
GRPC_TRACE_LOG(cds_lb, INFO)
<< "[cdslb " << this << "] created child policy "
<< (*child_config)->name() << " (" << child_policy_.get() << ")";
}
// Update child policy.
update_args.config = std::move(*child_config);

@ -511,9 +511,8 @@ XdsClusterImplLb::~XdsClusterImplLb() {
}
void XdsClusterImplLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] shutting down";
shutting_down_ = true;
ResetState();
xds_client_.reset(DEBUG_LOCATION, "XdsClusterImpl");
@ -560,9 +559,8 @@ std::string GetEdsResourceName(const XdsClusterResource& cluster_resource) {
}
absl::Status XdsClusterImplLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this << "] Received update";
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this << "] Received update";
// Grab new LB policy config.
auto new_config = args.config.TakeAsSubclass<XdsClusterImplLbConfig>();
// Cluster name should never change, because the cds policy will assign a
@ -732,11 +730,10 @@ void XdsClusterImplLb::MaybeUpdatePickerLocked() {
// whether) the child has reported.
if (drop_config_ != nullptr && drop_config_->drop_all()) {
auto drop_picker = MakeRefCounted<Picker>(this, picker_);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_impl_lb)) {
LOG(INFO) << "[xds_cluster_impl_lb " << this
<< "] updating connectivity (drop all): state=READY picker="
<< drop_picker.get();
}
GRPC_TRACE_LOG(xds_cluster_impl_lb, INFO)
<< "[xds_cluster_impl_lb " << this
<< "] updating connectivity (drop all): state=READY picker="
<< drop_picker.get();
channel_control_helper()->UpdateState(GRPC_CHANNEL_READY, absl::Status(),
std::move(drop_picker));
return;

@ -255,9 +255,8 @@ XdsClusterManagerLb::~XdsClusterManagerLb() {
}
void XdsClusterManagerLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this << "] shutting down";
shutting_down_ = true;
children_.clear();
}
@ -272,9 +271,8 @@ void XdsClusterManagerLb::ResetBackoffLocked() {
absl::Status XdsClusterManagerLb::UpdateLocked(UpdateArgs args) {
if (shutting_down_) return absl::OkStatus();
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this << "] Received update";
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this << "] Received update";
update_in_progress_ = true;
// Update config.
config_ = args.config.TakeAsSubclass<XdsClusterManagerLbConfig>();
@ -365,22 +363,18 @@ void XdsClusterManagerLb::UpdateStateLocked() {
} else {
connectivity_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
}
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this
<< "] connectivity changed to "
<< ConnectivityStateName(connectivity_state);
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this << "] connectivity changed to "
<< ConnectivityStateName(connectivity_state);
ClusterPicker::ClusterMap cluster_map;
for (const auto& p : config_->cluster_map()) {
const std::string& cluster_name = p.first;
RefCountedPtr<SubchannelPicker>& child_picker = cluster_map[cluster_name];
child_picker = children_[cluster_name]->picker();
if (child_picker == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << this << "] child "
<< cluster_name
<< " has not yet returned a picker; creating a QueuePicker.";
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << this << "] child " << cluster_name
<< " has not yet returned a picker; creating a QueuePicker.";
child_picker =
MakeRefCounted<QueuePicker>(Ref(DEBUG_LOCATION, "QueuePicker"));
}
@ -418,11 +412,9 @@ XdsClusterManagerLb::ClusterChild::~ClusterChild() {
}
void XdsClusterManagerLb::ClusterChild::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_
<< ": shutting down child";
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_ << ": shutting down child";
// Remove the child policy's interested_parties pollset_set from the
// xDS policy.
grpc_pollset_set_del_pollset_set(
@ -453,11 +445,10 @@ XdsClusterManagerLb::ClusterChild::CreateChildPolicyLocked(
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&xds_cluster_manager_lb_trace);
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_
<< ": Created new child policy handler " << lb_policy.get();
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_
<< ": Created new child policy handler " << lb_policy.get();
// Add the xDS's interested_parties pollset_set to that of the newly created
// child policy. This will make the child policy progress upon activity on
// xDS LB, which in turn is tied to the application's call.
@ -490,11 +481,10 @@ absl::Status XdsClusterManagerLb::ClusterChild::UpdateLocked(
update_args.addresses = addresses;
update_args.args = args;
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(xds_cluster_manager_lb)) {
LOG(INFO) << "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_
<< ": Updating child policy handler " << child_policy_.get();
}
GRPC_TRACE_LOG(xds_cluster_manager_lb, INFO)
<< "[xds_cluster_manager_lb " << xds_cluster_manager_policy_.get()
<< "] ClusterChild " << this << " " << name_
<< ": Updating child policy handler " << child_policy_.get();
return child_policy_->UpdateLocked(std::move(update_args));
}

@ -486,18 +486,15 @@ XdsOverrideHostLb::Picker::PickOverridenHost(
if (it == policy_->subchannel_map_.end()) continue;
if (!override_host_health_status_set_.Contains(
it->second->eds_health_status())) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "Subchannel " << address
<< " health status is not overridden ("
<< it->second->eds_health_status().ToString() << ")";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "Subchannel " << address << " health status is not overridden ("
<< it->second->eds_health_status().ToString() << ")";
continue;
}
auto subchannel = it->second->GetSubchannelRef();
if (subchannel == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "No subchannel for " << address;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "No subchannel for " << address;
if (address_with_no_subchannel.empty()) {
address_with_no_subchannel = it->first;
}
@ -507,9 +504,8 @@ XdsOverrideHostLb::Picker::PickOverridenHost(
if (connectivity_state == GRPC_CHANNEL_READY) {
// Found a READY subchannel. Pass back the actual address list
// and return the subchannel.
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "Picker override found READY subchannel " << address;
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "Picker override found READY subchannel " << address;
it->second->set_last_used_time();
override_host_attr->set_actual_address_list(it->second->address_list());
return PickResult::Complete(subchannel->wrapped_subchannel());
@ -523,9 +519,8 @@ XdsOverrideHostLb::Picker::PickOverridenHost(
// No READY subchannel found. If we found an IDLE subchannel, trigger
// a connection attempt and queue the pick until that attempt completes.
if (idle_subchannel != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "Picker override found IDLE subchannel";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "Picker override found IDLE subchannel";
// Deletes itself after the connection is requested.
new SubchannelConnectionRequester(std::move(idle_subchannel));
return PickResult::Queue();
@ -533,18 +528,16 @@ XdsOverrideHostLb::Picker::PickOverridenHost(
// No READY or IDLE subchannels. If we found a CONNECTING subchannel,
// queue the pick and wait for the connection attempt to complete.
if (found_connecting) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "Picker override found CONNECTING subchannel";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "Picker override found CONNECTING subchannel";
return PickResult::Queue();
}
// No READY, IDLE, or CONNECTING subchannels found. If we found an
// entry that has no subchannel, then queue the pick and trigger
// creation of a subchannel for that entry.
if (!address_with_no_subchannel.empty()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "Picker override found entry with no subchannel";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "Picker override found entry with no subchannel";
if (!IsWorkSerializerDispatchEnabled()) {
new SubchannelCreationRequester(policy_, address_with_no_subchannel);
} else {
@ -645,9 +638,8 @@ void XdsOverrideHostLb::IdleTimer::OnTimerLocked() {
XdsOverrideHostLb::XdsOverrideHostLb(Args args)
: LoadBalancingPolicy(std::move(args)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] created";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] created";
}
XdsOverrideHostLb::~XdsOverrideHostLb() {
@ -657,9 +649,8 @@ XdsOverrideHostLb::~XdsOverrideHostLb() {
}
void XdsOverrideHostLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] shutting down";
shutting_down_ = true;
ResetState();
}
@ -726,11 +717,9 @@ class ChildEndpointIterator final : public EndpointAddressesIterator {
parent_it_->ForEach([&](const EndpointAddresses& endpoint) {
XdsHealthStatus status = GetEndpointHealthStatus(endpoint);
if (status.status() != XdsHealthStatus::kDraining) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] endpoint "
<< endpoint.ToString()
<< ": not draining, passing to child";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] endpoint "
<< endpoint.ToString() << ": not draining, passing to child";
callback(endpoint);
}
});
@ -741,9 +730,8 @@ class ChildEndpointIterator final : public EndpointAddressesIterator {
};
absl::Status XdsOverrideHostLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(xds_override_host_lb)) {
LOG(INFO) << "[xds_override_host_lb " << this << "] Received update";
}
GRPC_TRACE_LOG(xds_override_host_lb, INFO)
<< "[xds_override_host_lb " << this << "] Received update";
// Grab new LB policy config.
if (args.config == nullptr) {
return absl::InvalidArgumentError("Missing policy config");

@ -136,15 +136,13 @@ XdsWrrLocalityLb::XdsWrrLocalityLb(Args args)
: LoadBalancingPolicy(std::move(args)) {}
XdsWrrLocalityLb::~XdsWrrLocalityLb() {
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] destroying";
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] destroying";
}
void XdsWrrLocalityLb::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] shutting down";
if (child_policy_ != nullptr) {
grpc_pollset_set_del_pollset_set(child_policy_->interested_parties(),
interested_parties());
@ -161,9 +159,8 @@ void XdsWrrLocalityLb::ResetBackoffLocked() {
}
absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this << "] Received update";
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] Received update";
auto config = args.config.TakeAsSubclass<XdsWrrLocalityLbConfig>();
// Scan the addresses to find the weight for each locality.
std::map<RefCountedStringValue, uint32_t> locality_weights;
@ -203,11 +200,9 @@ absl::Status XdsWrrLocalityLb::UpdateLocked(UpdateArgs args) {
})},
}),
});
if (GRPC_TRACE_FLAG_ENABLED(xds_wrr_locality_lb)) {
LOG(INFO) << "[xds_wrr_locality_lb " << this
<< "] generated child policy config: "
<< JsonDump(child_config_json, /*indent=*/1);
}
GRPC_TRACE_LOG(xds_wrr_locality_lb, INFO)
<< "[xds_wrr_locality_lb " << this << "] generated child policy config: "
<< JsonDump(child_config_json, /*indent=*/1);
// Parse config.
auto child_config =
CoreConfiguration::Get().lb_policy_registry().ParseLoadBalancingConfig(

@ -106,9 +106,10 @@ class AresClientChannelDNSResolver final : public PollingResolver {
resolver_->authority().c_str(), resolver_->name_to_resolve().c_str(),
kDefaultSecurePort, resolver_->interested_parties(),
&on_hostname_resolved_, &addresses_, resolver_->query_timeout_ms_));
GRPC_CARES_TRACE_LOG(
"resolver:%p Started resolving hostnames. hostname_request_:%p",
resolver_.get(), hostname_request_.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << resolver_.get()
<< " Started resolving hostnames. hostname_request_:"
<< hostname_request_.get();
if (resolver_->enable_srv_queries_) {
Ref(DEBUG_LOCATION, "OnSRVResolved").release();
GRPC_CLOSURE_INIT(&on_srv_resolved_, OnSRVResolved, this, nullptr);
@ -117,9 +118,10 @@ class AresClientChannelDNSResolver final : public PollingResolver {
resolver_->name_to_resolve().c_str(),
resolver_->interested_parties(), &on_srv_resolved_,
&balancer_addresses_, resolver_->query_timeout_ms_));
GRPC_CARES_TRACE_LOG(
"resolver:%p Started resolving SRV records. srv_request_:%p",
resolver_.get(), srv_request_.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << resolver_.get()
<< " Started resolving SRV records. srv_request_:"
<< srv_request_.get();
}
if (resolver_->request_service_config_) {
Ref(DEBUG_LOCATION, "OnTXTResolved").release();
@ -129,9 +131,10 @@ class AresClientChannelDNSResolver final : public PollingResolver {
resolver_->name_to_resolve().c_str(),
resolver_->interested_parties(), &on_txt_resolved_,
&service_config_json_, resolver_->query_timeout_ms_));
GRPC_CARES_TRACE_LOG(
"resolver:%p Started resolving TXT records. txt_request_:%p",
resolver_.get(), txt_request_.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << resolver_.get()
<< " Started resolving TXT records. txt_request_:"
<< txt_request_.get();
}
}
@ -219,8 +222,9 @@ AresClientChannelDNSResolver::AresClientChannelDNSResolver(
.value_or(GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS))) {}
AresClientChannelDNSResolver::~AresClientChannelDNSResolver() {
GRPC_CARES_TRACE_LOG("resolver:%p destroying AresClientChannelDNSResolver",
this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << this
<< " destroying AresClientChannelDNSResolver";
}
OrphanablePtr<Orphanable> AresClientChannelDNSResolver::StartRequest() {
@ -283,15 +287,16 @@ AresClientChannelDNSResolver::AresRequestWrapper::OnResolvedLocked(
grpc_error_handle error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(on_resolved_mu_) {
if (hostname_request_ != nullptr || srv_request_ != nullptr ||
txt_request_ != nullptr) {
GRPC_CARES_TRACE_LOG(
"resolver:%p OnResolved() waiting for results (hostname: %s, srv: %s, "
"txt: %s)",
this, hostname_request_ != nullptr ? "waiting" : "done",
srv_request_ != nullptr ? "waiting" : "done",
txt_request_ != nullptr ? "waiting" : "done");
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << this
<< " OnResolved() waiting for results (hostname: "
<< (hostname_request_ != nullptr ? "waiting" : "done")
<< ", srv: " << (srv_request_ != nullptr ? "waiting" : "done")
<< ", txt: " << (txt_request_ != nullptr ? "waiting" : "done") << ")";
return absl::nullopt;
}
GRPC_CARES_TRACE_LOG("resolver:%p OnResolved() proceeding", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << this << " OnResolved() proceeding";
Result result;
result.args = resolver_->channel_args();
// TODO(roth): Change logic to be able to report failures for addresses
@ -309,8 +314,9 @@ AresClientChannelDNSResolver::AresRequestWrapper::OnResolvedLocked(
absl::StrCat("failed to parse service config: ",
StatusToString(service_config_string.status())));
} else if (!service_config_string->empty()) {
GRPC_CARES_TRACE_LOG("resolver:%p selected service config choice: %s",
this, service_config_string->c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << this
<< " selected service config choice: " << *service_config_string;
result.service_config = ServiceConfigImpl::Create(
resolver_->channel_args(), *service_config_string);
if (!result.service_config.ok()) {
@ -325,8 +331,9 @@ AresClientChannelDNSResolver::AresRequestWrapper::OnResolvedLocked(
SetGrpcLbBalancerAddresses(result.args, *balancer_addresses_);
}
} else {
GRPC_CARES_TRACE_LOG("resolver:%p dns resolution failed: %s", this,
StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) resolver:" << this
<< " dns resolution failed: " << StatusToString(error);
std::string error_message;
grpc_error_get_str(error, StatusStrProperty::kDescription, &error_message);
absl::Status status = absl::UnavailableError(
@ -375,8 +382,9 @@ class AresDNSResolver final : public DNSResolver {
class AresRequest {
public:
virtual ~AresRequest() {
GRPC_CARES_TRACE_LOG("AresRequest:%p dtor ares_request_:%p", this,
grpc_ares_request_.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresRequest:" << this
<< " dtor ares_request_:" << grpc_ares_request_.get();
resolver_->UnregisterRequest(task_handle());
grpc_pollset_set_destroy(pollset_set_);
}
@ -397,8 +405,9 @@ class AresDNSResolver final : public DNSResolver {
bool Cancel() {
MutexLock lock(&mu_);
if (grpc_ares_request_ != nullptr) {
GRPC_CARES_TRACE_LOG("AresRequest:%p Cancel ares_request_:%p", this,
grpc_ares_request_.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresRequest:" << this
<< " Cancel ares_request_:" << grpc_ares_request_.get();
if (completed_) return false;
// OnDnsLookupDone will still be run
completed_ = true;
@ -499,7 +508,8 @@ class AresDNSResolver final : public DNSResolver {
aba_token),
default_port_(default_port),
on_resolve_address_done_(std::move(on_resolve_address_done)) {
GRPC_CARES_TRACE_LOG("AresHostnameRequest:%p ctor", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresHostnameRequest:" << this << " ctor";
}
std::unique_ptr<grpc_ares_request> MakeRequestLocked() override {
@ -508,13 +518,15 @@ class AresDNSResolver final : public DNSResolver {
name_server().c_str(), name().c_str(), default_port_.c_str(),
pollset_set(), on_dns_lookup_done(), &addresses_,
timeout().millis()));
GRPC_CARES_TRACE_LOG("AresHostnameRequest:%p Start ares_request_:%p",
this, ares_request.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresHostnameRequest:" << this
<< " Start ares_request_:" << ares_request.get();
return ares_request;
}
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresHostnameRequest:%p OnComplete", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresHostnameRequest:" << this << " OnComplete";
if (!error.ok()) {
on_resolve_address_done_(grpc_error_to_absl_status(error));
return;
@ -550,7 +562,8 @@ class AresDNSResolver final : public DNSResolver {
: AresRequest(name, name_server, timeout, interested_parties, resolver,
aba_token),
on_resolve_address_done_(std::move(on_resolve_address_done)) {
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p ctor", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresSRVRequest:" << this << " ctor";
}
std::unique_ptr<grpc_ares_request> MakeRequestLocked() override {
@ -558,13 +571,15 @@ class AresDNSResolver final : public DNSResolver {
std::unique_ptr<grpc_ares_request>(grpc_dns_lookup_srv_ares(
name_server().c_str(), name().c_str(), pollset_set(),
on_dns_lookup_done(), &balancer_addresses_, timeout().millis()));
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p Start ares_request_:%p", this,
ares_request.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresSRVRequest:" << this
<< " Start ares_request_:" << ares_request.get();
return ares_request;
}
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p OnComplete", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresSRVRequest:" << this << " OnComplete";
if (!error.ok()) {
on_resolve_address_done_(grpc_error_to_absl_status(error));
return;
@ -596,7 +611,8 @@ class AresDNSResolver final : public DNSResolver {
: AresRequest(name, name_server, timeout, interested_parties, resolver,
aba_token),
on_resolved_(std::move(on_resolved)) {
GRPC_CARES_TRACE_LOG("AresTXTRequest:%p ctor", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresTXTRequest:" << this << " ctor";
}
~AresTXTRequest() override { gpr_free(service_config_json_); }
@ -606,13 +622,15 @@ class AresDNSResolver final : public DNSResolver {
std::unique_ptr<grpc_ares_request>(grpc_dns_lookup_txt_ares(
name_server().c_str(), name().c_str(), pollset_set(),
on_dns_lookup_done(), &service_config_json_, timeout().millis()));
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p Start ares_request_:%p", this,
ares_request.get());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresSRVRequest:" << this
<< " Start ares_request_:" << ares_request.get();
return ares_request;
}
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p OnComplete", this);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresSRVRequest:" << this << " OnComplete";
if (!error.ok()) {
on_resolved_(grpc_error_to_absl_status(error));
return;
@ -684,14 +702,15 @@ class AresDNSResolver final : public DNSResolver {
MutexLock lock(&mu_);
if (!open_requests_.contains(handle)) {
// Unknown request, possibly completed already, or an invalid handle.
GRPC_CARES_TRACE_LOG(
"AresDNSResolver:%p attempt to cancel unknown TaskHandle:%s", this,
HandleToString(handle).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresDNSResolver:" << this
<< " attempt to cancel unknown TaskHandle:" << HandleToString(handle);
return false;
}
auto* request = reinterpret_cast<AresRequest*>(handle.keys[0]);
GRPC_CARES_TRACE_LOG("AresDNSResolver:%p cancel ares_request:%p", this,
request);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) AresDNSResolver:" << this
<< " cancel ares_request:" << request;
return request->Cancel();
}

@ -133,8 +133,9 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
}
~GrpcPolledFdWindows() override {
GRPC_CARES_TRACE_LOG("fd:|%s| ~GrpcPolledFdWindows shutdown_called_: %d ",
GetName(), shutdown_called_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| ~GrpcPolledFdWindows shutdown_called_: " << shutdown_called_;
CSliceUnref(read_buf_);
CSliceUnref(write_buf_);
CHECK_EQ(read_closure_, nullptr);
@ -173,10 +174,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
}
void ContinueRegisterForOnReadableLocked() {
GRPC_CARES_TRACE_LOG(
"fd:|%s| ContinueRegisterForOnReadableLocked "
"wsa_connect_error_:%d",
GetName(), wsa_connect_error_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| ContinueRegisterForOnReadableLocked "
<< "wsa_connect_error_:" << wsa_connect_error_;
CHECK(connect_done_);
if (wsa_connect_error_ != 0) {
ScheduleAndNullReadClosure(GRPC_WSA_ERROR(wsa_connect_error_, "connect"));
@ -194,10 +195,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
&winsocket_->read_info.overlapped, nullptr)) {
int wsa_last_error = WSAGetLastError();
char* msg = gpr_format_message(wsa_last_error);
GRPC_CARES_TRACE_LOG(
"fd:|%s| RegisterForOnReadableLocked WSARecvFrom error code:|%d| "
"msg:|%s|",
GetName(), wsa_last_error, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| RegisterForOnReadableLocked WSARecvFrom error code:|"
<< wsa_last_error << "| msg:|" << msg << "|";
gpr_free(msg);
if (wsa_last_error != WSA_IO_PENDING) {
ScheduleAndNullReadClosure(
@ -210,14 +211,15 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
void RegisterForOnWriteableLocked(grpc_closure* write_closure) override {
if (socket_type_ == SOCK_DGRAM) {
GRPC_CARES_TRACE_LOG("fd:|%s| RegisterForOnWriteableLocked called",
GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| RegisterForOnWriteableLocked called";
} else {
CHECK(socket_type_ == SOCK_STREAM);
GRPC_CARES_TRACE_LOG(
"fd:|%s| RegisterForOnWriteableLocked called tcp_write_state_: %d "
"connect_done_: %d",
GetName(), tcp_write_state_, connect_done_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| RegisterForOnWriteableLocked called tcp_write_state_: "
<< tcp_write_state_ << " connect_done_: " << connect_done_;
}
CHECK_EQ(write_closure_, nullptr);
write_closure_ = write_closure;
@ -234,10 +236,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
}
void ContinueRegisterForOnWriteableLocked() {
GRPC_CARES_TRACE_LOG(
"fd:|%s| ContinueRegisterForOnWriteableLocked "
"wsa_connect_error_:%d",
GetName(), wsa_connect_error_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| ContinueRegisterForOnWriteableLocked "
<< "wsa_connect_error_:" << wsa_connect_error_;
CHECK(connect_done_);
if (wsa_connect_error_ != 0) {
ScheduleAndNullWriteClosure(
@ -288,10 +290,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
ares_ssize_t RecvFrom(WSAErrorContext* wsa_error_ctx, void* data,
ares_socket_t data_len, int /* flags */,
struct sockaddr* from, ares_socklen_t* from_len) {
GRPC_CARES_TRACE_LOG(
"fd:|%s| RecvFrom called read_buf_has_data:%d Current read buf "
"length:|%d|",
GetName(), read_buf_has_data_, GRPC_SLICE_LENGTH(read_buf_));
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " RecvFrom called read_buf_has_data:" << read_buf_has_data_
<< " Current read buf length:" << GRPC_SLICE_LENGTH(read_buf_);
if (!read_buf_has_data_) {
wsa_error_ctx->SetWSAError(WSAEWOULDBLOCK);
return -1;
@ -340,20 +342,21 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int out = WSASend(grpc_winsocket_wrapped_socket(winsocket_), &buf, 1,
bytes_sent_ptr, flags, overlapped, nullptr);
*wsa_error_code = WSAGetLastError();
GRPC_CARES_TRACE_LOG(
"fd:|%s| SendWriteBuf WSASend buf.len:%d *bytes_sent_ptr:%d "
"overlapped:%p "
"return:%d *wsa_error_code:%d",
GetName(), buf.len, bytes_sent_ptr != nullptr ? *bytes_sent_ptr : 0,
overlapped, out, *wsa_error_code);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " SendWriteBuf WSASend buf.len:" << buf.len << " *bytes_sent_ptr:"
<< (bytes_sent_ptr != nullptr ? *bytes_sent_ptr : 0)
<< " overlapped:" << overlapped << " return:" << out
<< " *wsa_error_code:" << *wsa_error_code;
return out;
}
ares_ssize_t SendV(WSAErrorContext* wsa_error_ctx, const struct iovec* iov,
int iov_count) {
GRPC_CARES_TRACE_LOG(
"fd:|%s| SendV called connect_done_:%d wsa_connect_error_:%d",
GetName(), connect_done_, wsa_connect_error_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " SendV called connect_done_:" << connect_done_
<< " wsa_connect_error_:" << wsa_connect_error_;
if (!connect_done_) {
wsa_error_ctx->SetWSAError(WSAEWOULDBLOCK);
return -1;
@ -377,7 +380,8 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
// c-ares doesn't handle retryable errors on writes of UDP sockets.
// Therefore, the sendv handler for UDP sockets must only attempt
// to write everything inline.
GRPC_CARES_TRACE_LOG("fd:|%s| SendVUDP called", GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName() << " SendVUDP called";
CHECK_EQ(GRPC_SLICE_LENGTH(write_buf_), 0);
CSliceUnref(write_buf_);
write_buf_ = FlattenIovec(iov, iov_count);
@ -388,9 +392,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
write_buf_ = grpc_empty_slice();
wsa_error_ctx->SetWSAError(wsa_error_code);
char* msg = gpr_format_message(wsa_error_code);
GRPC_CARES_TRACE_LOG(
"fd:|%s| SendVUDP SendWriteBuf error code:%d msg:|%s|", GetName(),
wsa_error_code, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " SendVUDP SendWriteBuf error code:" << wsa_error_code
<< " msg:" << msg;
gpr_free(msg);
return -1;
}
@ -406,8 +411,9 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
// out in the background, and making further send progress in general, will
// happen as long as c-ares continues to show interest in writeability on
// this fd.
GRPC_CARES_TRACE_LOG("fd:|%s| SendVTCP called tcp_write_state_:%d",
GetName(), tcp_write_state_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " SendVTCP called tcp_write_state_:" << tcp_write_state_;
switch (tcp_write_state_) {
case WRITE_IDLE:
tcp_write_state_ = WRITE_REQUESTED;
@ -450,13 +456,13 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
}
void OnTcpConnectLocked(grpc_error_handle error) {
GRPC_CARES_TRACE_LOG(
"fd:%s InnerOnTcpConnectLocked error:|%s| "
"pending_register_for_readable:%d"
" pending_register_for_writeable:%d",
GetName(), StatusToString(error).c_str(),
pending_continue_register_for_on_readable_locked_,
pending_continue_register_for_on_writeable_locked_);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " InnerOnTcpConnectLocked error:" << StatusToString(error)
<< " pending_register_for_readable:"
<< pending_continue_register_for_on_readable_locked_
<< " pending_register_for_writeable:"
<< pending_continue_register_for_on_writeable_locked_;
CHECK(!connect_done_);
connect_done_ = true;
CHECK_EQ(wsa_connect_error_, 0);
@ -473,10 +479,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
if (!wsa_success) {
wsa_connect_error_ = WSAGetLastError();
char* msg = gpr_format_message(wsa_connect_error_);
GRPC_CARES_TRACE_LOG(
"fd:%s InnerOnTcpConnectLocked WSA overlapped result code:%d "
"msg:|%s|",
GetName(), wsa_connect_error_, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " InnerOnTcpConnectLocked WSA overlapped result code:"
<< wsa_connect_error_ << " msg:" << msg;
gpr_free(msg);
}
}
@ -502,7 +508,8 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int ConnectUDP(WSAErrorContext* wsa_error_ctx, const struct sockaddr* target,
ares_socklen_t target_len) {
GRPC_CARES_TRACE_LOG("fd:%s ConnectUDP", GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName() << " ConnectUDP";
CHECK(!connect_done_);
CHECK_EQ(wsa_connect_error_, 0);
SOCKET s = grpc_winsocket_wrapped_socket(winsocket_);
@ -512,8 +519,9 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
wsa_error_ctx->SetWSAError(wsa_connect_error_);
connect_done_ = true;
char* msg = gpr_format_message(wsa_connect_error_);
GRPC_CARES_TRACE_LOG("fd:%s WSAConnect error code:|%d| msg:|%s|", GetName(),
wsa_connect_error_, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName() << " WSAConnect error code:|"
<< wsa_connect_error_ << "| msg:|" << msg << "|";
gpr_free(msg);
// c-ares expects a posix-style connect API
return out == 0 ? 0 : -1;
@ -521,7 +529,8 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int ConnectTCP(WSAErrorContext* wsa_error_ctx, const struct sockaddr* target,
ares_socklen_t target_len) {
GRPC_CARES_TRACE_LOG("fd:%s ConnectTCP", GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName() << " ConnectTCP";
LPFN_CONNECTEX ConnectEx;
GUID guid = WSAID_CONNECTEX;
DWORD ioctl_num_bytes;
@ -532,10 +541,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int wsa_last_error = WSAGetLastError();
wsa_error_ctx->SetWSAError(wsa_last_error);
char* msg = gpr_format_message(wsa_last_error);
GRPC_CARES_TRACE_LOG(
"fd:%s WSAIoctl(SIO_GET_EXTENSION_FUNCTION_POINTER) error code:%d "
"msg:|%s|",
GetName(), wsa_last_error, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " WSAIoctl(SIO_GET_EXTENSION_FUNCTION_POINTER) error code:"
<< wsa_last_error << " msg:|" << msg << "|";
gpr_free(msg);
connect_done_ = true;
wsa_connect_error_ = wsa_last_error;
@ -555,8 +564,9 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int wsa_last_error = WSAGetLastError();
wsa_error_ctx->SetWSAError(wsa_last_error);
char* msg = gpr_format_message(wsa_last_error);
GRPC_CARES_TRACE_LOG("fd:%s bind error code:%d msg:|%s|", GetName(),
wsa_last_error, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " bind error code:" << wsa_last_error << " msg:|" << msg << "|";
gpr_free(msg);
connect_done_ = true;
wsa_connect_error_ = wsa_last_error;
@ -569,8 +579,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
int wsa_last_error = WSAGetLastError();
wsa_error_ctx->SetWSAError(wsa_last_error);
char* msg = gpr_format_message(wsa_last_error);
GRPC_CARES_TRACE_LOG("fd:%s ConnectEx error code:%d msg:|%s|", GetName(),
wsa_last_error, msg);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << GetName()
<< " ConnectEx error code:" << wsa_last_error << " msg:|" << msg
<< "|";
gpr_free(msg);
if (wsa_last_error == WSA_IO_PENDING) {
// c-ares only understands WSAEINPROGRESS and EWOULDBLOCK error codes on
@ -610,11 +622,12 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
if (winsocket_->read_info.wsa_error != WSAEMSGSIZE) {
error = GRPC_WSA_ERROR(winsocket_->read_info.wsa_error,
"OnIocpReadableInner");
GRPC_CARES_TRACE_LOG(
"fd:|%s| OnIocpReadableInner winsocket_->read_info.wsa_error "
"code:|%d| msg:|%s|",
GetName(), winsocket_->read_info.wsa_error,
StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| OnIocpReadableInner winsocket_->read_info.wsa_error "
"code:|"
<< winsocket_->read_info.wsa_error << "| msg:|"
<< StatusToString(error) << "|";
}
}
}
@ -626,9 +639,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
CSliceUnref(read_buf_);
read_buf_ = grpc_empty_slice();
}
GRPC_CARES_TRACE_LOG(
"fd:|%s| OnIocpReadable finishing. read buf length now:|%d|", GetName(),
GRPC_SLICE_LENGTH(read_buf_));
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| OnIocpReadable finishing. read buf length now:|"
<< GRPC_SLICE_LENGTH(read_buf_) << "|";
ScheduleAndNullReadClosure(error);
}
@ -639,17 +653,19 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
}
void OnIocpWriteableLocked(grpc_error_handle error) {
GRPC_CARES_TRACE_LOG("OnIocpWriteableInner. fd:|%s|", GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) OnIocpWriteableInner. fd:|" << GetName() << "|";
CHECK(socket_type_ == SOCK_STREAM);
if (error.ok()) {
if (winsocket_->write_info.wsa_error != 0) {
error = GRPC_WSA_ERROR(winsocket_->write_info.wsa_error,
"OnIocpWriteableInner");
GRPC_CARES_TRACE_LOG(
"fd:|%s| OnIocpWriteableInner. winsocket_->write_info.wsa_error "
"code:|%d| msg:|%s|",
GetName(), winsocket_->write_info.wsa_error,
StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| OnIocpWriteableInner. winsocket_->write_info.wsa_error "
"code:|"
<< winsocket_->write_info.wsa_error << "| msg:|"
<< StatusToString(error) << "|";
}
}
CHECK(tcp_write_state_ == WRITE_PENDING);
@ -657,8 +673,10 @@ class GrpcPolledFdWindows final : public GrpcPolledFd {
tcp_write_state_ = WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY;
write_buf_ = grpc_slice_sub_no_ref(
write_buf_, 0, winsocket_->write_info.bytes_transferred);
GRPC_CARES_TRACE_LOG("fd:|%s| OnIocpWriteableInner. bytes transferred:%d",
GetName(), winsocket_->write_info.bytes_transferred);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:|" << GetName()
<< "| OnIocpWriteableInner. bytes transferred:"
<< winsocket_->write_info.bytes_transferred;
} else {
CSliceUnref(write_buf_);
write_buf_ = grpc_empty_slice();
@ -728,7 +746,9 @@ class GrpcPolledFdFactoryWindows final : public GrpcPolledFdFactory {
//
static ares_socket_t Socket(int af, int type, int protocol, void* user_data) {
if (type != SOCK_DGRAM && type != SOCK_STREAM) {
GRPC_CARES_TRACE_LOG("Socket called with invalid socket type:%d", type);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) Socket called with invalid socket type:"
<< type;
return INVALID_SOCKET;
}
GrpcPolledFdFactoryWindows* self =
@ -736,15 +756,16 @@ class GrpcPolledFdFactoryWindows final : public GrpcPolledFdFactory {
SOCKET s = WSASocket(af, type, protocol, nullptr, 0,
grpc_get_default_wsa_socket_flags());
if (s == INVALID_SOCKET) {
GRPC_CARES_TRACE_LOG(
"WSASocket failed with params af:%d type:%d protocol:%d", af, type,
protocol);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) WSASocket failed with params af:" << af
<< " type:" << type << " protocol:" << protocol;
return s;
}
grpc_error_handle error = grpc_tcp_set_non_block(s);
if (!error.ok()) {
GRPC_CARES_TRACE_LOG("WSAIoctl failed with error: %s",
StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) WSAIoctl failed with error: "
<< StatusToString(error);
return INVALID_SOCKET;
}
auto on_shutdown_locked = [self, s]() {
@ -755,9 +776,10 @@ class GrpcPolledFdFactoryWindows final : public GrpcPolledFdFactory {
};
auto polled_fd = new GrpcPolledFdWindows(s, self->mu_, af, type,
std::move(on_shutdown_locked));
GRPC_CARES_TRACE_LOG(
"fd:|%s| created with params af:%d type:%d protocol:%d",
polled_fd->GetName(), af, type, protocol);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) fd:" << polled_fd->GetName()
<< " created with params af:" << af << " type:" << type
<< " protocol:" << protocol;
CHECK(self->sockets_.insert({s, polled_fd}).second);
return s;
}

@ -200,8 +200,9 @@ static absl::Status AresStatusToAbslStatus(int status,
static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
GRPC_CARES_TRACE_LOG("request:%p Ref ev_driver %p", ev_driver->request,
ev_driver);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request << " Ref ev_driver "
<< ev_driver;
gpr_ref(&ev_driver->refs);
return ev_driver;
}
@ -211,11 +212,13 @@ static void grpc_ares_complete_request_locked(grpc_ares_request* r)
static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
GRPC_CARES_TRACE_LOG("request:%p Unref ev_driver %p", ev_driver->request,
ev_driver);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " Unref ev_driver " << ev_driver;
if (gpr_unref(&ev_driver->refs)) {
GRPC_CARES_TRACE_LOG("request:%p destroy ev_driver %p", ev_driver->request,
ev_driver);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " destroy ev_driver " << ev_driver;
CHECK_EQ(ev_driver->fds, nullptr);
ares_destroy(ev_driver->channel);
grpc_ares_complete_request_locked(ev_driver->request);
@ -225,8 +228,9 @@ static void grpc_ares_ev_driver_unref(grpc_ares_ev_driver* ev_driver)
static void fd_node_destroy_locked(fd_node* fdn)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&grpc_ares_request::mu) {
GRPC_CARES_TRACE_LOG("request:%p delete fd: %s", fdn->ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << fdn->ev_driver->request
<< " delete fd: " << fdn->grpc_polled_fd->GetName();
CHECK(!fdn->readable_registered);
CHECK(!fdn->writable_registered);
CHECK(fdn->already_shutdown);
@ -292,21 +296,21 @@ static grpc_core::Timestamp calculate_next_ares_backup_poll_alarm(
// by the c-ares code comments.
grpc_core::Duration until_next_ares_backup_poll_alarm =
grpc_core::Duration::Seconds(1);
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p. next ares process poll time in "
"%" PRId64 " ms",
driver->request, driver, until_next_ares_backup_poll_alarm.millis());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << driver->request
<< " ev_driver=" << driver << ". next ares process poll time in "
<< until_next_ares_backup_poll_alarm.millis() << " ms";
return grpc_core::Timestamp::Now() + until_next_ares_backup_poll_alarm;
}
static void on_timeout(void* arg, grpc_error_handle error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
grpc_core::MutexLock lock(&driver->request->mu);
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p on_timeout_locked. driver->shutting_down=%d. "
"err=%s",
driver->request, driver, driver->shutting_down,
grpc_core::StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << driver->request
<< " ev_driver=" << driver
<< " on_timeout_locked. driver->shutting_down=" << driver->shutting_down
<< ". err=" << grpc_core::StatusToString(error);
if (!driver->shutting_down && error.ok()) {
grpc_ares_ev_driver_shutdown_locked(driver);
}
@ -327,20 +331,20 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver)
static void on_ares_backup_poll_alarm(void* arg, grpc_error_handle error) {
grpc_ares_ev_driver* driver = static_cast<grpc_ares_ev_driver*>(arg);
grpc_core::MutexLock lock(&driver->request->mu);
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p on_ares_backup_poll_alarm_locked. "
"driver->shutting_down=%d. "
"err=%s",
driver->request, driver, driver->shutting_down,
grpc_core::StatusToString(error).c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << driver->request
<< " ev_driver=" << driver
<< " on_ares_backup_poll_alarm_locked. driver->shutting_down="
<< driver->shutting_down << ". err=" << grpc_core::StatusToString(error);
if (!driver->shutting_down && error.ok()) {
fd_node* fdn = driver->fds;
while (fdn != nullptr) {
if (!fdn->already_shutdown) {
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p on_ares_backup_poll_alarm_locked; "
"ares_process_fd. fd=%s",
driver->request, driver, fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << driver->request
<< " ev_driver=" << driver
<< " on_ares_backup_poll_alarm_locked; ares_process_fd. fd="
<< fdn->grpc_polled_fd->GetName();
ares_socket_t as = fdn->grpc_polled_fd->GetWrappedAresSocketLocked();
ares_process_fd(driver->channel, as, as);
}
@ -373,8 +377,9 @@ static void on_readable(void* arg, grpc_error_handle error) {
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
const ares_socket_t as = fdn->grpc_polled_fd->GetWrappedAresSocketLocked();
fdn->readable_registered = false;
GRPC_CARES_TRACE_LOG("request:%p readable on %s", fdn->ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << fdn->ev_driver->request
<< " readable on " << fdn->grpc_polled_fd->GetName();
if (error.ok() && !ev_driver->shutting_down) {
ares_process_fd(ev_driver->channel, as, ARES_SOCKET_BAD);
} else {
@ -397,8 +402,9 @@ static void on_writable(void* arg, grpc_error_handle error) {
grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
const ares_socket_t as = fdn->grpc_polled_fd->GetWrappedAresSocketLocked();
fdn->writable_registered = false;
GRPC_CARES_TRACE_LOG("request:%p writable on %s", ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request << " writable on "
<< fdn->grpc_polled_fd->GetName();
if (error.ok() && !ev_driver->shutting_down) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, as);
} else {
@ -433,8 +439,9 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver)
fdn->grpc_polled_fd =
ev_driver->polled_fd_factory->NewGrpcPolledFdLocked(
socks[i], ev_driver->pollset_set);
GRPC_CARES_TRACE_LOG("request:%p new fd: %s", ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " new fd: " << fdn->grpc_polled_fd->GetName();
fdn->readable_registered = false;
fdn->writable_registered = false;
fdn->already_shutdown = false;
@ -449,15 +456,16 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver)
GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable, fdn,
grpc_schedule_on_exec_ctx);
if (fdn->grpc_polled_fd->IsFdStillReadableLocked()) {
GRPC_CARES_TRACE_LOG("request:%p schedule direct read on: %s",
ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " schedule direct read on: "
<< fdn->grpc_polled_fd->GetName();
grpc_core::ExecCtx::Run(DEBUG_LOCATION, &fdn->read_closure,
absl::OkStatus());
} else {
GRPC_CARES_TRACE_LOG("request:%p notify read on: %s",
ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " notify read on: " << fdn->grpc_polled_fd->GetName();
fdn->grpc_polled_fd->RegisterForOnReadableLocked(
&fdn->read_closure);
}
@ -467,9 +475,9 @@ static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver)
// has not been registered with this socket.
if (ARES_GETSOCK_WRITABLE(socks_bitmask, i) &&
!fdn->writable_registered) {
GRPC_CARES_TRACE_LOG("request:%p notify write on: %s",
ev_driver->request,
fdn->grpc_polled_fd->GetName());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " notify write on: " << fdn->grpc_polled_fd->GetName();
grpc_ares_ev_driver_ref(ev_driver);
GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable, fdn,
grpc_schedule_on_exec_ctx);
@ -505,10 +513,11 @@ void grpc_ares_ev_driver_start_locked(grpc_ares_ev_driver* ev_driver)
ev_driver->query_timeout_ms == 0
? grpc_core::Duration::Infinity()
: grpc_core::Duration::Milliseconds(ev_driver->query_timeout_ms);
GRPC_CARES_TRACE_LOG(
"request:%p ev_driver=%p grpc_ares_ev_driver_start_locked. timeout in "
"%" PRId64 " ms",
ev_driver->request, ev_driver, timeout.millis());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << ev_driver->request
<< " ev_driver=" << ev_driver
<< " grpc_ares_ev_driver_start_locked. timeout in " << timeout.millis()
<< " ms";
grpc_ares_ev_driver_ref(ev_driver);
GRPC_CLOSURE_INIT(&ev_driver->on_timeout_locked, on_timeout, ev_driver,
grpc_schedule_on_exec_ctx);
@ -547,7 +556,8 @@ grpc_error_handle grpc_ares_ev_driver_create_locked(
}
int status = ares_init_options(&(*ev_driver)->channel, &opts, ARES_OPT_FLAGS);
grpc_ares_test_only_inject_config(&(*ev_driver)->channel);
GRPC_CARES_TRACE_LOG("request:%p grpc_ares_ev_driver_create_locked", request);
GRPC_TRACE_VLOG(cares_resolver, 2) << "(c-ares resolver) request:" << request
<< " grpc_ares_ev_driver_create_locked";
if (status != ARES_SUCCESS) {
grpc_error_handle err = GRPC_ERROR_CREATE(absl::StrCat(
"Failed to init ares channel. C-ares error: ", ares_strerror(status)));
@ -645,10 +655,10 @@ static grpc_ares_hostbyname_request* create_hostbyname_request_locked(
grpc_ares_request* parent_request, const char* host, uint16_t port,
bool is_balancer, const char* qtype)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(parent_request->mu) {
GRPC_CARES_TRACE_LOG(
"request:%p create_hostbyname_request_locked host:%s port:%d "
"is_balancer:%d qtype:%s",
parent_request, host, port, is_balancer, qtype);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << parent_request
<< " create_hostbyname_request_locked host:" << host << " port:" << port
<< " is_balancer:" << is_balancer << " qtype:" << qtype;
grpc_ares_hostbyname_request* hr = new grpc_ares_hostbyname_request();
hr->parent_request = parent_request;
hr->host = gpr_strdup(host);
@ -675,9 +685,10 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
static_cast<grpc_ares_hostbyname_request*>(arg);
grpc_ares_request* r = hr->parent_request;
if (status == ARES_SUCCESS) {
GRPC_CARES_TRACE_LOG(
"request:%p on_hostbyname_done_locked qtype=%s host=%s ARES_SUCCESS", r,
hr->qtype, hr->host);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " on_hostbyname_done_locked qtype=" << hr->qtype
<< " host=" << hr->host << " ARES_SUCCESS";
std::unique_ptr<EndpointAddressesList>* address_list_ptr =
hr->is_balancer ? r->balancer_addresses_out : r->addresses_out;
if (*address_list_ptr == nullptr) {
@ -701,10 +712,11 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
addr->sin6_port = hr->port;
char output[INET6_ADDRSTRLEN];
ares_inet_ntop(AF_INET6, &addr->sin6_addr, output, INET6_ADDRSTRLEN);
GRPC_CARES_TRACE_LOG(
"request:%p c-ares resolver gets a AF_INET6 result: \n"
" addr: %s\n port: %d\n sin6_scope_id: %d\n",
r, output, ntohs(hr->port), addr->sin6_scope_id);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " c-ares resolver gets a AF_INET6 result: \n"
<< " addr: " << output << "\n port: " << ntohs(hr->port)
<< "\n sin6_scope_id: " << addr->sin6_scope_id << "\n";
break;
}
case AF_INET: {
@ -716,10 +728,10 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
addr->sin_port = hr->port;
char output[INET_ADDRSTRLEN];
ares_inet_ntop(AF_INET, &addr->sin_addr, output, INET_ADDRSTRLEN);
GRPC_CARES_TRACE_LOG(
"request:%p c-ares resolver gets a AF_INET result: \n"
" addr: %s\n port: %d\n",
r, output, ntohs(hr->port));
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " c-ares resolver gets a AF_INET result: \n addr: " << output
<< "\n port: " << ntohs(hr->port) << "\n";
break;
}
}
@ -729,8 +741,9 @@ static void on_hostbyname_done_locked(void* arg, int status, int /*timeouts*/,
std::string error_msg = absl::StrFormat(
"C-ares status is not ARES_SUCCESS qtype=%s name=%s is_balancer=%d: %s",
hr->qtype, hr->host, hr->is_balancer, ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_hostbyname_done_locked: %s", r,
error_msg.c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " on_hostbyname_done_locked: " << error_msg;
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
@ -745,13 +758,14 @@ static void on_srv_query_done_locked(void* arg, int status, int /*timeouts*/,
GrpcAresQuery* q = static_cast<GrpcAresQuery*>(arg);
grpc_ares_request* r = q->parent_request();
if (status == ARES_SUCCESS) {
GRPC_CARES_TRACE_LOG(
"request:%p on_srv_query_done_locked name=%s ARES_SUCCESS", r,
q->name().c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " on_srv_query_done_locked name=" << q->name() << " ARES_SUCCESS";
struct ares_srv_reply* reply;
const int parse_status = ares_parse_srv_reply(abuf, alen, &reply);
GRPC_CARES_TRACE_LOG("request:%p ares_parse_srv_reply: %d", r,
parse_status);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " ares_parse_srv_reply: " << parse_status;
if (parse_status == ARES_SUCCESS) {
for (struct ares_srv_reply* srv_it = reply; srv_it != nullptr;
srv_it = srv_it->next) {
@ -775,8 +789,9 @@ static void on_srv_query_done_locked(void* arg, int status, int /*timeouts*/,
std::string error_msg = absl::StrFormat(
"C-ares status is not ARES_SUCCESS qtype=SRV name=%s: %s", q->name(),
ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_srv_query_done_locked: %s", r,
error_msg.c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " on_srv_query_done_locked: " << error_msg;
r->error = grpc_error_add_child(AresStatusToAbslStatus(status, error_msg),
r->error);
}
@ -797,8 +812,9 @@ static void on_txt_done_locked(void* arg, int status, int /*timeouts*/,
struct ares_txt_ext* result = nullptr;
struct ares_txt_ext* reply = nullptr;
if (status != ARES_SUCCESS) goto fail;
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked name=%s ARES_SUCCESS", r,
q->name().c_str());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " on_txt_done_locked name=" << q->name() << " ARES_SUCCESS";
status = ares_parse_txt_reply_ext(buf, len, &reply);
if (status != ARES_SUCCESS) goto fail;
// Find service config in TXT record.
@ -826,8 +842,9 @@ static void on_txt_done_locked(void* arg, int status, int /*timeouts*/,
service_config_len += result->length;
}
(*r->service_config_json_out)[service_config_len] = '\0';
GRPC_CARES_TRACE_LOG("request:%p found service config: %s", r,
*r->service_config_json_out);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " found service config: " << *r->service_config_json_out;
}
// Clean up.
ares_free_data(reply);
@ -837,8 +854,8 @@ fail:
std::string error_msg =
absl::StrFormat("C-ares status is not ARES_SUCCESS qtype=TXT name=%s: %s",
q->name(), ares_strerror(status));
GRPC_CARES_TRACE_LOG("request:%p on_txt_done_locked %s", r,
error_msg.c_str());
GRPC_TRACE_VLOG(cares_resolver, 2) << "(c-ares resolver) request:" << r
<< " on_txt_done_locked " << error_msg;
r->error =
grpc_error_add_child(AresStatusToAbslStatus(status, error_msg), r->error);
}
@ -847,8 +864,9 @@ grpc_error_handle set_request_dns_server(grpc_ares_request* r,
absl::string_view dns_server)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(r->mu) {
if (!dns_server.empty()) {
GRPC_CARES_TRACE_LOG("request:%p Using DNS server %s", r,
dns_server.data());
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r << " Using DNS server "
<< dns_server.data();
grpc_resolved_address addr;
if (grpc_parse_ipv4_hostport(dns_server, &addr, /*log_errors=*/false)) {
r->dns_server_addr.family = AF_INET;
@ -1043,10 +1061,10 @@ static grpc_ares_request* grpc_dns_lookup_hostname_ares_impl(
r->ev_driver = nullptr;
r->on_done = on_done;
r->addresses_out = addrs;
GRPC_CARES_TRACE_LOG(
"request:%p c-ares grpc_dns_lookup_hostname_ares_impl name=%s, "
"default_port=%s",
r, name, default_port);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " c-ares grpc_dns_lookup_hostname_ares_impl name=" << name
<< ", default_port=" << default_port;
// Early out if the target is an ipv4 or ipv6 literal.
if (resolve_as_ip_literal_locked(name, default_port, addrs)) {
grpc_ares_complete_request_locked(r);
@ -1097,8 +1115,9 @@ grpc_ares_request* grpc_dns_lookup_srv_ares_impl(
r->ev_driver = nullptr;
r->on_done = on_done;
r->balancer_addresses_out = balancer_addresses;
GRPC_CARES_TRACE_LOG(
"request:%p c-ares grpc_dns_lookup_srv_ares_impl name=%s", r, name);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " c-ares grpc_dns_lookup_srv_ares_impl name=" << name;
grpc_error_handle error;
// Don't query for SRV records if the target is "localhost"
if (target_matches_localhost(name)) {
@ -1135,8 +1154,9 @@ grpc_ares_request* grpc_dns_lookup_txt_ares_impl(
r->ev_driver = nullptr;
r->on_done = on_done;
r->service_config_json_out = service_config_json;
GRPC_CARES_TRACE_LOG(
"request:%p c-ares grpc_dns_lookup_txt_ares_impl name=%s", r, name);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " c-ares grpc_dns_lookup_txt_ares_impl name=" << name;
grpc_error_handle error;
// Don't query for TXT records if the target is "localhost"
if (target_matches_localhost(name)) {
@ -1185,8 +1205,9 @@ grpc_ares_request* (*grpc_dns_lookup_txt_ares)(
static void grpc_cancel_ares_request_impl(grpc_ares_request* r) {
CHECK_NE(r, nullptr);
grpc_core::MutexLock lock(&r->mu);
GRPC_CARES_TRACE_LOG("request:%p grpc_cancel_ares_request ev_driver:%p", r,
r->ev_driver);
GRPC_TRACE_VLOG(cares_resolver, 2)
<< "(c-ares resolver) request:" << r
<< " grpc_cancel_ares_request ev_driver:" << r->ev_driver;
if (r->ev_driver != nullptr) {
grpc_ares_ev_driver_shutdown_locked(r->ev_driver);
}

@ -39,13 +39,6 @@
#define GRPC_DNS_ARES_DEFAULT_QUERY_TIMEOUT_MS 120000
#define GRPC_CARES_TRACE_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) { \
VLOG(2) << "(c-ares resolver) " << absl::StrFormat(format, __VA_ARGS__); \
} \
} while (0)
typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
struct grpc_ares_request {

@ -374,9 +374,8 @@ XdsDependencyManager::XdsDependencyManager(
}
void XdsDependencyManager::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] shutting down";
if (listener_watcher_ != nullptr) {
XdsListenerResourceType::CancelWatch(
xds_client_.get(), listener_resource_name_, listener_watcher_,
@ -450,11 +449,9 @@ void XdsDependencyManager::OnListenerUpdate(
}
// Start watch for the new RDS resource name.
route_config_name_ = rds_name;
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] starting watch for route config "
<< route_config_name_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] starting watch for route config " << route_config_name_;
auto watcher =
MakeRefCounted<RouteConfigWatcher>(Ref(), route_config_name_);
route_config_watcher_ = watcher.get();
@ -537,11 +534,9 @@ absl::flat_hash_set<absl::string_view> GetClustersFromVirtualHost(
void XdsDependencyManager::OnRouteConfigUpdate(
const std::string& name,
std::shared_ptr<const XdsRouteConfigResource> route_config) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received RouteConfig update for "
<< (name.empty() ? "<inline>" : name);
}
GRPC_TRACE_LOG(xds_resolver, INFO) << "[XdsDependencyManager " << this
<< "] received RouteConfig update for "
<< (name.empty() ? "<inline>" : name);
if (xds_client_ == nullptr) return;
// Ignore updates for stale names.
if (name.empty()) {
@ -572,20 +567,18 @@ void XdsDependencyManager::OnRouteConfigUpdate(
}
void XdsDependencyManager::OnError(std::string context, absl::Status status) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this
<< "] received Listener or RouteConfig error: " << context << " "
<< status;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this
<< "] received Listener or RouteConfig error: " << context << " "
<< status;
if (xds_client_ == nullptr) return;
if (current_virtual_host_ != nullptr) return;
watcher_->OnError(context, std::move(status));
}
void XdsDependencyManager::OnResourceDoesNotExist(std::string context) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[XdsDependencyManager " << this << "] " << context;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[XdsDependencyManager " << this << "] " << context;
if (xds_client_ == nullptr) return;
current_virtual_host_ = nullptr;
watcher_->OnResourceDoesNotExist(std::move(context));

@ -110,17 +110,14 @@ class XdsResolver final : public Resolver {
uri_(std::move(args.uri)),
data_plane_authority_(std::move(data_plane_authority)),
channel_id_(absl::Uniform<uint64_t>(absl::BitGen())) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] created for URI "
<< uri_.ToString() << "; data plane authority is "
<< data_plane_authority_;
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] created for URI " << uri_.ToString()
<< "; data plane authority is " << data_plane_authority_;
}
~XdsResolver() override {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] destroyed";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] destroyed";
}
void StartLocked() override;
@ -969,9 +966,8 @@ void XdsResolver::StartLocked() {
}
void XdsResolver::ShutdownLocked() {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] shutting down";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] shutting down";
if (xds_client_ != nullptr) {
dependency_mgr_.reset();
grpc_pollset_set_del_pollset_set(xds_client_->interested_parties(),
@ -982,9 +978,8 @@ void XdsResolver::ShutdownLocked() {
void XdsResolver::OnUpdate(
RefCountedPtr<const XdsDependencyManager::XdsConfig> config) {
if (GRPC_TRACE_FLAG_ENABLED(xds_resolver)) {
LOG(INFO) << "[xds_resolver " << this << "] received updated xDS config";
}
GRPC_TRACE_LOG(xds_resolver, INFO)
<< "[xds_resolver " << this << "] received updated xDS config";
if (xds_client_ == nullptr) return;
current_config_ = std::move(config);
GenerateResult();

@ -586,11 +586,9 @@ XdsServerConfigFetcher::ListenerWatcher::ListenerWatcher(
void XdsServerConfigFetcher::ListenerWatcher::OnResourceChanged(
std::shared_ptr<const XdsListenerResource> listener,
RefCountedPtr<ReadDelayHandle> /* read_delay_handle */) {
if (GRPC_TRACE_FLAG_ENABLED(xds_server_config_fetcher)) {
LOG(INFO) << "[ListenerWatcher " << this
<< "] Received LDS update from xds client " << xds_client_.get()
<< ": " << listener->ToString();
}
GRPC_TRACE_LOG(xds_server_config_fetcher, INFO)
<< "[ListenerWatcher " << this << "] Received LDS update from xds client "
<< xds_client_.get() << ": " << listener->ToString();
auto* tcp_listener =
absl::get_if<XdsListenerResource::TcpListener>(&listener->listener);
if (tcp_listener == nullptr) {

@ -642,11 +642,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;
}
if (GRPC_TRACE_FLAG_ENABLED(tsi)) {
LOG(INFO) << (impl->is_client ? "Client" : "Server") << " prepared "
<< tsi_fake_handshake_message_to_string(
impl->next_message_to_send);
}
GRPC_TRACE_LOG(tsi, INFO)
<< (impl->is_client ? "Client" : "Server") << " prepared "
<< tsi_fake_handshake_message_to_string(impl->next_message_to_send);
impl->next_message_to_send = next_message_to_send;
}
result =

@ -30,6 +30,7 @@
#include "src/core/lib/gprpp/crash.h"
extern int gpr_should_log(gpr_log_severity severity);
extern void gpr_log_message(const char* file, int line,
gpr_log_severity severity, const char* message);

@ -38,15 +38,14 @@
#include "src/core/handshaker/handshaker.h"
#include "src/core/handshaker/handshaker_registry.h"
#include "src/core/handshaker/tcp_connect/tcp_connect_handshaker.h"
#include "src/core/lib/address_utils/sockaddr_utils.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_args_preconditioning.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/event_engine/tcp_socket_utils.h"
#include "src/core/lib/gprpp/status_helper.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/resource_quota/api.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
@ -59,6 +58,9 @@ namespace grpc_core {
namespace {
using grpc_event_engine::experimental::EventEngine;
using grpc_event_engine::experimental::ResolvedAddressToURI;
grpc_httpcli_get_override g_get_override;
grpc_httpcli_post_override g_post_override;
grpc_httpcli_put_override g_put_override;
@ -173,7 +175,10 @@ HttpRequest::HttpRequest(
pollent_(pollent),
pollset_set_(grpc_pollset_set_create()),
test_only_generate_response_(std::move(test_only_generate_response)),
resolver_(GetDNSResolver()) {
resolver_(
ChannelArgs::FromC(channel_args_)
.GetObjectRef<EventEngine>()
->GetDNSResolver(EventEngine::DNSResolver::ResolverOptions())) {
grpc_http_parser_init(&parser_, GRPC_HTTP_RESPONSE, response);
grpc_slice_buffer_init(&incoming_);
grpc_slice_buffer_init(&outgoing_);
@ -207,11 +212,14 @@ void HttpRequest::Start() {
test_only_generate_response_.value()();
return;
}
if (!resolver_.ok()) {
Finish(resolver_.status());
return;
}
Ref().release(); // ref held by pending DNS resolution
dns_request_handle_ = resolver_->LookupHostname(
absl::bind_front(&HttpRequest::OnResolved, this), uri_.authority(),
uri_.scheme(), kDefaultDNSRequestTimeout, pollset_set_,
/*name_server=*/"");
(*resolver_)
->LookupHostname(absl::bind_front(&HttpRequest::OnResolved, this),
uri_.authority(), uri_.scheme());
}
void HttpRequest::Orphan() {
@ -220,10 +228,8 @@ void HttpRequest::Orphan() {
CHECK(!cancelled_);
cancelled_ = true;
// cancel potentially pending DNS resolution.
if (dns_request_handle_.has_value() &&
resolver_->Cancel(dns_request_handle_.value())) {
Finish(GRPC_ERROR_CREATE("cancelled during DNS resolution"));
Unref();
if (*resolver_ != nullptr) {
resolver_->reset();
}
if (handshake_mgr_ != nullptr) {
// Shutdown will cancel any ongoing tcp connect.
@ -239,8 +245,7 @@ void HttpRequest::AppendError(grpc_error_handle error) {
if (overall_error_.ok()) {
overall_error_ = GRPC_ERROR_CREATE("Failed HTTP/1 client request");
}
const grpc_resolved_address* addr = &addresses_[next_address_ - 1];
auto addr_text = grpc_sockaddr_to_uri(addr);
auto addr_text = ResolvedAddressToURI(addresses_[next_address_ - 1]);
if (addr_text.ok()) error = AddMessagePrefix(*addr_text, std::move(error));
overall_error_ = grpc_error_add_child(overall_error_, std::move(error));
}
@ -310,7 +315,7 @@ void HttpRequest::OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result) {
StartWrite();
}
void HttpRequest::DoHandshake(const grpc_resolved_address* addr) {
void HttpRequest::DoHandshake(const EventEngine::ResolvedAddress& addr) {
// Create the security connector using the credentials and target name.
ChannelArgs args = ChannelArgs::FromC(channel_args_);
RefCountedPtr<grpc_channel_security_connector> sc =
@ -321,7 +326,7 @@ void HttpRequest::DoHandshake(const grpc_resolved_address* addr) {
&overall_error_, 1));
return;
}
absl::StatusOr<std::string> address = grpc_sockaddr_to_uri(addr);
absl::StatusOr<std::string> address = ResolvedAddressToURI(addr);
if (!address.ok()) {
Finish(GRPC_ERROR_CREATE_REFERENCING("Failed to extract URI from address",
&overall_error_, 1));
@ -354,15 +359,16 @@ void HttpRequest::NextAddress(grpc_error_handle error) {
&overall_error_, 1));
return;
}
const grpc_resolved_address* addr = &addresses_[next_address_++];
DoHandshake(addr);
DoHandshake(addresses_[next_address_++]);
}
void HttpRequest::OnResolved(
absl::StatusOr<std::vector<grpc_resolved_address>> addresses_or) {
absl::StatusOr<std::vector<EventEngine::ResolvedAddress>> addresses_or) {
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
RefCountedPtr<HttpRequest> unreffer(this);
MutexLock lock(&mu_);
dns_request_handle_.reset();
resolver_->reset();
if (cancelled_) {
Finish(GRPC_ERROR_CREATE("cancelled during DNS resolution"));
return;

@ -32,6 +32,7 @@
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include <grpc/event_engine/event_engine.h>
#include <grpc/grpc.h>
#include <grpc/slice.h>
@ -48,8 +49,6 @@
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/resolved_address.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/uri/uri_parser.h"
#include "src/core/util/http_client/parser.h"
@ -223,13 +222,16 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
void OnHandshakeDone(absl::StatusOr<HandshakerArgs*> result);
void DoHandshake(const grpc_resolved_address* addr)
void DoHandshake(
const grpc_event_engine::experimental::EventEngine::ResolvedAddress& addr)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void NextAddress(grpc_error_handle error) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void OnResolved(
absl::StatusOr<std::vector<grpc_resolved_address>> addresses_or);
absl::StatusOr<std::vector<
grpc_event_engine::experimental::EventEngine::ResolvedAddress>>
addresses_or);
const URI uri_;
const grpc_slice request_text_;
@ -250,16 +252,17 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
RefCountedPtr<HandshakeManager> handshake_mgr_ ABSL_GUARDED_BY(mu_);
bool cancelled_ ABSL_GUARDED_BY(mu_) = false;
grpc_http_parser parser_ ABSL_GUARDED_BY(mu_);
std::vector<grpc_resolved_address> addresses_ ABSL_GUARDED_BY(mu_);
std::vector<grpc_event_engine::experimental::EventEngine::ResolvedAddress>
addresses_ ABSL_GUARDED_BY(mu_);
size_t next_address_ ABSL_GUARDED_BY(mu_) = 0;
int have_read_byte_ ABSL_GUARDED_BY(mu_) = 0;
grpc_iomgr_object iomgr_obj_ ABSL_GUARDED_BY(mu_);
grpc_slice_buffer incoming_ ABSL_GUARDED_BY(mu_);
grpc_slice_buffer outgoing_ ABSL_GUARDED_BY(mu_);
grpc_error_handle overall_error_ ABSL_GUARDED_BY(mu_) = absl::OkStatus();
std::shared_ptr<DNSResolver> resolver_;
absl::optional<DNSResolver::TaskHandle> dns_request_handle_
ABSL_GUARDED_BY(mu_) = DNSResolver::kNullHandle;
absl::StatusOr<std::unique_ptr<
grpc_event_engine::experimental::EventEngine::DNSResolver>>
resolver_;
};
} // namespace grpc_core

@ -35,16 +35,25 @@ thread_local uint64_t Log::thread_id_ = Log::Get().next_thread_id_.fetch_add(1);
thread_local Bin* Log::bin_ = nullptr;
thread_local void* Log::bin_owner_ = nullptr;
std::atomic<uint64_t> Flow::next_flow_id_{1};
std::atomic<Bin*> Log::free_bins_;
std::atomic<uintptr_t> Log::free_bins_{0};
std::string Log::GenerateJson() {
std::vector<RecordedEvent> events;
RingBuffer<RecordedEvent, Log::kMaxEventsPerCpu>* other;
for (auto& fragment : fragments_) {
MutexLock lock(&fragment.mu);
for (auto it = fragment.events.begin(); it != fragment.events.end(); ++it) {
{
MutexLock lock(&fragment.mu);
other = fragment.active;
if (fragment.active == &fragment.primary) {
fragment.active = &fragment.secondary;
} else {
fragment.active = &fragment.primary;
}
}
for (auto it = other->begin(); it != other->end(); ++it) {
events.push_back(*it);
}
fragment.events.Clear();
other->Clear();
}
absl::optional<std::chrono::steady_clock::time_point> start_time;
for (auto& event : events) {
@ -113,7 +122,7 @@ void Log::FlushBin(Bin* bin) {
{
MutexLock lock(&fragment.mu);
for (auto event : bin->events) {
fragment.events.Append(RecordedEvent{thread_id, batch_id, event});
fragment.active->Append(RecordedEvent{thread_id, batch_id, event});
}
}
bin->events.clear();

@ -18,6 +18,9 @@
#include <grpc/support/port_platform.h>
#ifdef GRPC_ENABLE_LATENT_SEE
#include <sys/syscall.h>
#include <unistd.h>
#include <atomic>
#include <chrono>
#include <cstdint>
@ -27,12 +30,17 @@
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "src/core/lib/gprpp/per_cpu.h"
#include "src/core/lib/gprpp/sync.h"
#include "src/core/util/ring_buffer.h"
#define TAGGED_POINTER_SIZE_BITS 48
namespace grpc_core {
namespace latent_see {
@ -59,47 +67,59 @@ struct Bin {
}
std::vector<Event> events;
Bin* next_free;
uintptr_t next_free = 0;
};
class Log {
public:
static constexpr int kMaxEventsPerCpu = 50000;
static Bin* MaybeStartBin(void* owner) {
if (bin_ != nullptr) return bin_;
Bin* bin = free_bins_.load(std::memory_order_acquire);
static constexpr uintptr_t kTagMask = (1ULL << TAGGED_POINTER_SIZE_BITS) - 1;
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION static uintptr_t IncrementTag(
uintptr_t input) {
return input + (1UL << TAGGED_POINTER_SIZE_BITS);
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION static Bin* ToBin(uintptr_t ptr) {
return reinterpret_cast<Bin*>(ptr & kTagMask);
}
static uintptr_t StartBin(void* owner) {
uintptr_t bin_descriptor = free_bins_.load(std::memory_order_acquire);
Bin* bin;
do {
if (bin == nullptr) {
if (bin_descriptor == 0) {
bin = new Bin();
break;
}
} while (!free_bins_.compare_exchange_weak(bin, bin->next_free,
std::memory_order_acq_rel));
bin = ToBin(bin_descriptor);
} while (!free_bins_.compare_exchange_strong(bin_descriptor, bin->next_free,
std::memory_order_acq_rel));
bin_ = bin;
bin_owner_ = owner;
return bin;
return reinterpret_cast<uintptr_t>(bin);
}
static void EndBin(void* owner) {
if (bin_owner_ != owner) return;
FlushBin(bin_);
bin_->next_free = free_bins_.load(std::memory_order_acquire);
while (!free_bins_.compare_exchange_weak(bin_->next_free, bin_,
std::memory_order_acq_rel)) {
static void EndBin(uintptr_t bin_descriptor, void* owner) {
if (bin_owner_ != owner || bin_descriptor == 0) return;
FlushBin(ToBin(bin_descriptor));
uintptr_t next_free = free_bins_.load(std::memory_order_acquire);
while (!free_bins_.compare_exchange_strong(
next_free, IncrementTag(bin_descriptor), std::memory_order_acq_rel)) {
}
bin_ = nullptr;
bin_owner_ = nullptr;
}
static Bin* CurrentThreadBin() { return bin_; }
private:
Log() = default;
static void FlushBin(Bin* bin);
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION static Log& Get() {
static Log* log = []() {
atexit([] {
if (log->stats_flusher_ != nullptr) {
log->stats_flusher_(log->GenerateJson());
return;
}
LOG(INFO) << "Writing latent_see.json in " << get_current_dir_name();
FILE* f = fopen("latent_see.json", "w");
if (f == nullptr) return;
@ -113,6 +133,16 @@ class Log {
std::string GenerateJson();
void OverrideStatsFlusher(
absl::AnyInvocable<void(absl::string_view)> stats_exporter) {
stats_flusher_ = std::move(stats_exporter);
}
private:
Log() = default;
static void FlushBin(Bin* bin);
struct RecordedEvent {
uint64_t thread_id;
uint64_t batch_id;
@ -123,10 +153,15 @@ class Log {
static thread_local uint64_t thread_id_;
static thread_local Bin* bin_;
static thread_local void* bin_owner_;
static std::atomic<Bin*> free_bins_;
static std::atomic<uintptr_t> free_bins_;
absl::AnyInvocable<void(absl::string_view)> stats_flusher_ = nullptr;
struct Fragment {
Fragment() : active(&primary){};
Mutex mu;
RingBuffer<RecordedEvent, Log::kMaxEventsPerCpu> events ABSL_GUARDED_BY(mu);
RingBuffer<RecordedEvent, Log::kMaxEventsPerCpu>* active
ABSL_GUARDED_BY(mu);
RingBuffer<RecordedEvent, Log::kMaxEventsPerCpu> primary;
RingBuffer<RecordedEvent, Log::kMaxEventsPerCpu> secondary;
};
PerCpu<Fragment> fragments_{PerCpuOptions()};
};
@ -136,11 +171,17 @@ class Scope {
public:
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION explicit Scope(const Metadata* metadata)
: metadata_(metadata) {
bin_ = Log::CurrentThreadBin();
if (kParent && bin_ == nullptr) {
bin_descriptor_ = Log::StartBin(this);
bin_ = Log::ToBin(bin_descriptor_);
}
CHECK_NE(bin_, nullptr);
bin_->Append(metadata_, EventType::kBegin, 0);
}
GPR_ATTRIBUTE_ALWAYS_INLINE_FUNCTION ~Scope() {
bin_->Append(metadata_, EventType::kEnd, 0);
if (kParent) Log::EndBin(this);
if (kParent) Log::EndBin(bin_descriptor_, this);
}
Scope(const Scope&) = delete;
@ -148,8 +189,8 @@ class Scope {
private:
const Metadata* const metadata_;
Bin* const bin_ =
kParent ? Log::MaybeStartBin(this) : Log::CurrentThreadBin();
uintptr_t bin_descriptor_ = 0;
Bin* bin_ = nullptr;
};
using ParentScope = Scope<true>;

@ -47,6 +47,7 @@
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/examine_stack.h"
extern int gpr_should_log(gpr_log_severity severity);
extern void gpr_log_message(const char* file, int line,
gpr_log_severity severity, const char* message);

@ -83,10 +83,6 @@ void gpr_log_message(const char* file, int line, gpr_log_severity severity,
}
void gpr_log_verbosity_init(void) {
// This is enabled in Github only.
// This ifndef is converted to ifdef internally by copybara.
// Internally grpc verbosity is managed using absl settings.
// So internally we avoid setting it like this.
#ifndef GRPC_VERBOSITY_MACRO
// SetMinLogLevel sets the value for the entire binary, not just gRPC.
// This setting will change things for other libraries/code that is unrelated
@ -119,10 +115,3 @@ void gpr_log_verbosity_init(void) {
}
#endif // GRPC_VERBOSITY_MACRO
}
void gpr_set_log_function([[maybe_unused]] gpr_log_func deprecated_setting) {
LOG(ERROR)
<< "This function is deprecated. This function will be deleted in the "
"next gRPC release. You may create a new absl LogSink with similar "
"functionality. gRFC: https://github.com/grpc/proposal/pull/425 ";
}

@ -38,6 +38,7 @@
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/gprpp/examine_stack.h"
extern int gpr_should_log(gpr_log_severity severity);
extern void gpr_log_message(const char* file, int line,
gpr_log_severity severity, const char* message);

@ -33,6 +33,7 @@
#include "src/core/lib/gprpp/examine_stack.h"
#include "src/core/util/string.h"
extern int gpr_should_log(gpr_log_severity severity);
extern void gpr_log_message(const char* file, int line,
gpr_log_severity severity, const char* message);

@ -198,11 +198,10 @@ absl::StatusOr<std::string> GetBootstrapContents(const char* fallback_config) {
// First, try GRPC_XDS_BOOTSTRAP env var.
auto path = GetEnv("GRPC_XDS_BOOTSTRAP");
if (path.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: "
<< *path;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "Got bootstrap file location from GRPC_XDS_BOOTSTRAP "
"environment variable: "
<< *path;
auto contents = LoadFile(*path, /*add_null_terminator=*/true);
if (!contents.ok()) return contents.status();
return std::string(contents->as_string_view());
@ -257,9 +256,8 @@ absl::StatusOr<RefCountedPtr<GrpcXdsClient>> GrpcXdsClient::GetOrCreate(
// Find bootstrap contents.
auto bootstrap_contents = GetBootstrapContents(g_fallback_bootstrap_config);
if (!bootstrap_contents.ok()) return bootstrap_contents.status();
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "xDS bootstrap contents: " << *bootstrap_contents;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "xDS bootstrap contents: " << *bootstrap_contents;
// Parse bootstrap.
auto bootstrap = GrpcXdsBootstrap::Create(*bootstrap_contents);
if (!bootstrap.ok()) return bootstrap.status();
@ -313,10 +311,8 @@ GrpcXdsClient::GrpcXdsClient(
.certificate_providers())),
stats_plugin_group_(GetStatsPluginGroupForKey(key_)),
registered_metric_callback_(stats_plugin_group_.RegisterCallback(
[xds_client = WeakRefAsSubclass<GrpcXdsClient>(
DEBUG_LOCATION, "GrpcXdsClient Metric Callback")](
CallbackMetricReporter& reporter) {
xds_client->ReportCallbackMetrics(reporter);
[this](CallbackMetricReporter& reporter) {
ReportCallbackMetrics(reporter);
},
Duration::Seconds(5), kMetricConnected, kMetricResources)) {}

@ -599,11 +599,9 @@ void XdsClient::XdsChannel::SetHealthyLocked() {
auto channel_it = std::find(channels.begin(), channels.end(), this);
// Skip if this is not on the list
if (channel_it != channels.end()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] authority "
<< authority.first << ": Falling forward to "
<< server_.server_uri();
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] authority "
<< authority.first << ": Falling forward to " << server_.server_uri();
// Lower priority channels are no longer needed, connection is back!
channels.erase(channel_it + 1, channels.end());
}
@ -711,11 +709,10 @@ void XdsClient::XdsChannel::RetryableCall<T>::StartNewCallLocked() {
if (shutting_down_) return;
CHECK(xds_channel_->transport_ != nullptr);
CHECK(call_ == nullptr);
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_channel()->xds_client()
<< "] xds server " << xds_channel()->server_.server_uri()
<< ": start new call from retryable call " << this;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_channel()->xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": start new call from retryable call " << this;
call_ = MakeOrphanable<T>(
this->Ref(DEBUG_LOCATION, "RetryableCall+start_new_call"));
}
@ -747,11 +744,10 @@ void XdsClient::XdsChannel::RetryableCall<T>::OnRetryTimer() {
if (timer_handle_.has_value()) {
timer_handle_.reset();
if (shutting_down_) return;
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_channel()->xds_client()
<< "] xds server " << xds_channel()->server_.server_uri()
<< ": retry timer fired (retryable call: " << this << ")";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_channel()->xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": retry timer fired (retryable call: " << this << ")";
StartNewCallLocked();
}
}
@ -945,11 +941,9 @@ void XdsClient::XdsChannel::AdsCall::AdsResponseParser::ParseResource(
if (resource_state.resource != nullptr &&
result_.type->ResourcesEqual(resource_state.resource.get(),
decode_result.resource->get())) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] " << result_.type_url
<< " resource " << resource_name
<< " identical to current, ignoring.";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] " << result_.type_url
<< " resource " << resource_name << " identical to current, ignoring.";
return;
}
// Update the resource state.
@ -1490,11 +1484,10 @@ void XdsClient::XdsChannel::LrsCall::OnRecvMessage(absl::string_view payload) {
if (send_all_clusters == send_all_clusters_ &&
cluster_names_ == new_cluster_names &&
load_reporting_interval_ == new_load_reporting_interval) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": incoming LRS response identical to current, ignoring.";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client() << "] xds server "
<< xds_channel()->server_.server_uri()
<< ": incoming LRS response identical to current, ignoring.";
return;
}
// If the interval has changed, we'll need to restart the timer below.
@ -1559,9 +1552,8 @@ XdsClient::XdsClient(
work_serializer_(engine),
engine_(std::move(engine)),
metrics_reporter_(std::move(metrics_reporter)) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] creating xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] creating xds client";
CHECK(bootstrap_ != nullptr);
if (bootstrap_->node() != nullptr) {
GRPC_TRACE_LOG(xds_client, INFO)
@ -1571,15 +1563,13 @@ XdsClient::XdsClient(
}
XdsClient::~XdsClient() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] destroying xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] destroying xds client";
}
void XdsClient::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] shutting down xds client";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] shutting down xds client";
MutexLock lock(&mu_);
shutting_down_ = true;
// Clear cache and any remaining watchers that may not have been cancelled.
@ -1721,11 +1711,10 @@ void XdsClient::WatchResource(const XdsResourceType* type,
DEBUG_LOCATION);
} else if (resource_state.meta.client_status ==
XdsApi::ResourceMetadata::NACKED) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] reporting cached validation failure for " << name
<< ": " << resource_state.meta.failed_details;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this
<< "] reporting cached validation failure for " << name << ": "
<< resource_state.meta.failed_details;
std::string details = resource_state.meta.failed_details;
const auto* node = bootstrap_->node();
if (node != nullptr) {
@ -1744,11 +1733,9 @@ void XdsClient::WatchResource(const XdsResourceType* type,
}
absl::Status channel_status = authority_state.xds_channels.back()->status();
if (!channel_status.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this
<< "] returning cached channel error for " << name << ": "
<< channel_status;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] returning cached channel error for "
<< name << ": " << channel_status;
work_serializer_.Schedule(
[watcher = std::move(watcher), status = std::move(channel_status)]()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(&work_serializer_) mutable {
@ -2047,9 +2034,8 @@ void XdsClient::NotifyWatchersOnResourceDoesNotExist(
XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
const XdsBootstrap::XdsServer& xds_server, bool send_all_clusters,
const std::set<std::string>& clusters) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] start building load report";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] start building load report";
XdsApi::ClusterLoadReportMap snapshot_map;
auto server_it = xds_load_report_server_map_.find(xds_server.Key());
if (server_it == xds_load_report_server_map_.end()) return snapshot_map;
@ -2074,11 +2060,10 @@ XdsApi::ClusterLoadReportMap XdsClient::BuildLoadReportSnapshotLocked(
if (load_report.drop_stats != nullptr) {
snapshot.dropped_requests +=
load_report.drop_stats->GetSnapshotAndReset();
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << this << "] cluster=" << cluster_key.first
<< " eds_service_name=" << cluster_key.second
<< " drop_stats=" << load_report.drop_stats;
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << this << "] cluster=" << cluster_key.first
<< " eds_service_name=" << cluster_key.second
<< " drop_stats=" << load_report.drop_stats;
}
// Aggregate locality stats.
for (auto it = load_report.locality_stats.begin();

@ -51,19 +51,17 @@ XdsClusterDropStats::XdsClusterDropStats(RefCountedPtr<XdsClient> xds_client,
lrs_server_(lrs_server),
cluster_name_(cluster_name),
eds_service_name_(eds_service_name) {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get() << "] created drop stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_
<< ", " << eds_service_name_ << "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] created drop stats " << this
<< " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << "}";
}
XdsClusterDropStats::~XdsClusterDropStats() {
if (GRPC_TRACE_FLAG_ENABLED(xds_client)) {
LOG(INFO) << "[xds_client " << xds_client_.get()
<< "] destroying drop stats " << this << " for {" << lrs_server_
<< ", " << cluster_name_ << ", " << eds_service_name_ << "}";
}
GRPC_TRACE_LOG(xds_client, INFO)
<< "[xds_client " << xds_client_.get() << "] destroying drop stats "
<< this << " for {" << lrs_server_ << ", " << cluster_name_ << ", "
<< eds_service_name_ << "}";
xds_client_->RemoveClusterDropStats(lrs_server_, cluster_name_,
eds_service_name_, this);
xds_client_.reset(DEBUG_LOCATION, "DropStats");

@ -0,0 +1,36 @@
// Copyright 2024 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include <grpcpp/support/global_callback_hook.h>
namespace grpc {
static absl::NoDestructor<std::shared_ptr<GlobalCallbackHook>> g_callback_hook(
std::make_shared<DefaultGlobalCallbackHook>());
std::shared_ptr<GlobalCallbackHook> GetGlobalCallbackHook() {
return *g_callback_hook;
}
void SetGlobalCallbackHook(GlobalCallbackHook* hook) {
CHECK(hook != nullptr);
CHECK(hook != (*g_callback_hook).get());
*g_callback_hook = std::shared_ptr<GlobalCallbackHook>(hook);
}
} // namespace grpc

@ -75,7 +75,6 @@ grpc_cc_library(
"//src/core:channel_fwd",
"//src/core:channel_stack_type",
"//src/core:context",
"//src/core:default_event_engine",
"//src/core:error",
"//src/core:experiments",
"//src/core:match",

@ -37,7 +37,6 @@
#include "src/core/client_channel/client_channel_filter.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gprpp/match.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/telemetry/call_tracer.h"
@ -367,8 +366,7 @@ OpenTelemetryPluginImpl::OpenTelemetryPluginImpl(
absl::AnyInvocable<
bool(const OpenTelemetryPluginBuilder::ChannelScope& /*scope*/) const>
channel_scope_filter)
: event_engine_(grpc_event_engine::experimental::GetDefaultEventEngine()),
meter_provider_(std::move(meter_provider)),
: meter_provider_(std::move(meter_provider)),
server_selector_(std::move(server_selector)),
target_attribute_filter_(std::move(target_attribute_filter)),
generic_method_attribute_filter_(
@ -577,6 +575,7 @@ OpenTelemetryPluginImpl::~OpenTelemetryPluginImpl() {
[](const std::unique_ptr<opentelemetry::metrics::Histogram<double>>&) {
},
[](const std::unique_ptr<CallbackGaugeState<int64_t>>& state) {
CHECK(state->caches.empty());
if (state->ot_callback_registered) {
state->instrument->RemoveCallback(
&CallbackGaugeState<int64_t>::CallbackGaugeCallback,
@ -585,6 +584,7 @@ OpenTelemetryPluginImpl::~OpenTelemetryPluginImpl() {
}
},
[](const std::unique_ptr<CallbackGaugeState<double>>& state) {
CHECK(state->caches.empty());
if (state->ot_callback_registered) {
state->instrument->RemoveCallback(
&CallbackGaugeState<double>::CallbackGaugeCallback,
@ -855,9 +855,6 @@ void OpenTelemetryPluginImpl::AddCallback(
void OpenTelemetryPluginImpl::RemoveCallback(
grpc_core::RegisteredMetricCallback* callback) {
std::vector<
absl::variant<CallbackGaugeState<int64_t>*, CallbackGaugeState<double>*>>
gauges_that_need_to_remove_callback;
{
grpc_core::MutexLock lock(&mu_);
callback_timestamps_.erase(callback);
@ -902,6 +899,13 @@ void OpenTelemetryPluginImpl::RemoveCallback(
}
}
}
// Note that we are not removing the callback from OpenTelemetry immediately,
// and instead remove it when the plugin is destroyed. We just have a single
// callback per OpenTelemetry instrument which is a small number. If we decide
// to remove the callback immediately at this point, we need to make sure that
// 1) the callback is removed without holding mu_ and 2) we make sure that
// this does not race against a possible `AddCallback` operation. A potential
// way to do this is to use WorkSerializer.
}
template <typename ValueType>

@ -481,7 +481,6 @@ class OpenTelemetryPluginImpl
ABSL_EXCLUSIVE_LOCKS_REQUIRED(ot_plugin->mu_);
};
std::shared_ptr<grpc_event_engine::experimental::EventEngine> event_engine_;
// Instruments for per-call metrics.
ClientMetrics client_;
ServerMetrics server_;

@ -70,30 +70,26 @@ void ServerMetricRecorder::UpdateBackendMetricDataState(
void ServerMetricRecorder::SetCpuUtilization(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->cpu_utilization = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization set: " << value;
}
void ServerMetricRecorder::SetMemoryUtilization(double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->mem_utilization = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization set: " << value;
}
void ServerMetricRecorder::SetApplicationUtilization(double value) {
@ -105,37 +101,30 @@ void ServerMetricRecorder::SetApplicationUtilization(double value) {
UpdateBackendMetricDataState([value](BackendMetricData* data) {
data->application_utilization = value;
});
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization set: " << value;
}
void ServerMetricRecorder::SetQps(double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->qps = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this << "] QPS set: " << value;
}
void ServerMetricRecorder::SetEps(double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS rejected: " << value;
return;
}
UpdateBackendMetricDataState(
[value](BackendMetricData* data) { data->eps = value; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS set: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO) << "[" << this << "] EPS set: " << value;
}
void ServerMetricRecorder::SetNamedUtilization(string_ref name, double value) {
@ -171,39 +160,34 @@ void ServerMetricRecorder::SetAllNamedUtilization(
void ServerMetricRecorder::ClearCpuUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->cpu_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization cleared.";
}
void ServerMetricRecorder::ClearMemoryUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->mem_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization cleared.";
}
void ServerMetricRecorder::ClearApplicationUtilization() {
UpdateBackendMetricDataState(
[](BackendMetricData* data) { data->application_utilization = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization cleared.";
}
void ServerMetricRecorder::ClearQps() {
UpdateBackendMetricDataState([](BackendMetricData* data) { data->qps = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS utilization cleared.";
}
void ServerMetricRecorder::ClearEps() {
UpdateBackendMetricDataState([](BackendMetricData* data) { data->eps = -1; });
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS utilization cleared.";
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS utilization cleared.";
}
void ServerMetricRecorder::ClearNamedUtilization(string_ref name) {
@ -245,30 +229,26 @@ ServerMetricRecorder::GetMetricsIfChanged() const {
experimental::CallMetricRecorder&
BackendMetricState::RecordCpuUtilizationMetric(double value) {
if (!IsUtilizationWithSoftLimitsValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization value rejected: " << value;
return *this;
}
cpu_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] CPU utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] CPU utilization recorded: " << value;
return *this;
}
experimental::CallMetricRecorder&
BackendMetricState::RecordMemoryUtilizationMetric(double value) {
if (!IsUtilizationValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization value rejected: " << value;
return *this;
}
mem_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Mem utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Mem utilization recorded: " << value;
return *this;
}
@ -280,39 +260,34 @@ BackendMetricState::RecordApplicationUtilizationMetric(double value) {
return *this;
}
application_utilization_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] Application utilization recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] Application utilization recorded: " << value;
return *this;
}
experimental::CallMetricRecorder& BackendMetricState::RecordQpsMetric(
double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS value rejected: " << value;
return *this;
}
qps_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] QPS recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] QPS recorded: " << value;
return *this;
}
experimental::CallMetricRecorder& BackendMetricState::RecordEpsMetric(
double value) {
if (!IsRateValid(value)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS value rejected: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS value rejected: " << value;
return *this;
}
eps_.store(value, std::memory_order_relaxed);
if (GRPC_TRACE_FLAG_ENABLED(backend_metric)) {
LOG(INFO) << "[" << this << "] EPS recorded: " << value;
}
GRPC_TRACE_LOG(backend_metric, INFO)
<< "[" << this << "] EPS recorded: " << value;
return *this;
}

@ -30,7 +30,6 @@ cdef class _CallState:
cdef object call_tracer_capsule
cdef void maybe_save_registered_method(self, bytes method_name) except *
cdef void maybe_set_client_call_tracer_on_call(self, bytes method_name, bytes target) except *
cdef void maybe_delete_call_tracer(self) except *
cdef void delete_call(self) except *

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save