[logging] Centralize configuration for trace flags (#36576)

All TraceFlags are now configured in `src/core/lib/debug/trace_flags.yaml`. The format is:

```
my_flag:
  default: false                   # the default value; default=false
  description: Some Description
  debug_only: false                # debug_only flags only work in debug builds; default=false
  internal: false                  # internal flags will not show up in documentation; default=false
```

To regenerate the trace flag source code, run `tools/codegen/core/gen_trace_flags.py` (requires mako). This script is also run when sanity checking.

This PR also adds two new features:

### Glob-based flag configuration

Trace flag configuration now supports `?` (single wildcard character) and `*` (one or more wildcard characters).  For example, using `GRPC_TRACE='event_engine*'` will enable all flags that match that glob. It expands to:

* event_engine
* event_engine_client_channel_resolver
* event_engine_dns
* event_engine_endpoint
* event_engine_endpoint_data
* event_engine_poller

### A cleaner trace-logging macro in abseil logging format

If your goal is only to add log statements when the `fault_injection_filter` trace flag is enabled, you can use the macro:

```
GRPC_TRACE_LOG(fault_injection, INFO) << "Filtered:" << 42;
```

When the trace flag is enabled, the the log will show something like this:
```
I0000 00:00:1715733657.430042      16 file.cc:174] Filtered:42
```

----

Note: just like with the gpr_log to abseil logging conversion, the pre-existing trace logging usages can be replaced with the new tracing macro across multiple PRs.

Closes #36576

PiperOrigin-RevId: 641295215
pull/36856/head
AJ Heller 6 months ago committed by Copybara-Service
parent d63dcc188b
commit c9fdef1317
  1. 4
      .gitattributes
  2. 56
      BUILD
  3. 334
      CMakeLists.txt
  4. 12
      Makefile
  5. 21
      Package.swift
  6. 401
      build_autogenerated.yaml
  7. 12
      config.m4
  8. 12
      config.w32
  9. 101
      doc/environment_variables.md
  10. 120
      doc/trace_flags.md
  11. 18
      gRPC-C++.podspec
  12. 30
      gRPC-Core.podspec
  13. 21
      grpc.gemspec
  14. 21
      package.xml
  15. 98
      src/core/BUILD
  16. 224
      src/core/client_channel/client_channel.cc
  17. 554
      src/core/client_channel/client_channel_filter.cc
  18. 49
      src/core/client_channel/load_balanced_call_destination.cc
  19. 2
      src/core/client_channel/retry_filter.cc
  20. 3
      src/core/client_channel/retry_filter.h
  21. 109
      src/core/client_channel/retry_filter_legacy_call_data.cc
  22. 16
      src/core/client_channel/subchannel.cc
  23. 2
      src/core/client_channel/subchannel_pool_interface.cc
  24. 4
      src/core/client_channel/subchannel_pool_interface.h
  25. 8
      src/core/ext/filters/backend_metrics/backend_metric_filter.cc
  26. 3
      src/core/ext/filters/channel_idle/legacy_channel_idle_filter.cc
  27. 3
      src/core/ext/filters/fault_injection/fault_injection_filter.cc
  28. 9
      src/core/ext/filters/http/message_compress/compression_filter.cc
  29. 3
      src/core/ext/filters/http/server/http_server_filter.cc
  30. 3
      src/core/ext/filters/message_size/message_size_filter.cc
  31. 1
      src/core/ext/filters/stateful_session/stateful_session_filter.cc
  32. 21
      src/core/ext/transport/chaotic_good/chaotic_good_transport.cc
  33. 8
      src/core/ext/transport/chaotic_good/chaotic_good_transport.h
  34. 2
      src/core/ext/transport/chaotic_good/client/chaotic_good_connector.cc
  35. 4
      src/core/ext/transport/chaotic_good/client_transport.cc
  36. 18
      src/core/ext/transport/chaotic_good/server/chaotic_good_server.cc
  37. 12
      src/core/ext/transport/chaotic_good/server_transport.cc
  38. 57
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  39. 5
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  40. 4
      src/core/ext/transport/chttp2/transport/flow_control.cc
  41. 2
      src/core/ext/transport/chttp2/transport/flow_control.h
  42. 10
      src/core/ext/transport/chttp2/transport/frame_ping.cc
  43. 3
      src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
  44. 7
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  45. 3
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  46. 6
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  47. 9
      src/core/ext/transport/chttp2/transport/hpack_parser_table.cc
  48. 19
      src/core/ext/transport/chttp2/transport/http_trace.cc
  49. 24
      src/core/ext/transport/chttp2/transport/http_trace.h
  50. 5
      src/core/ext/transport/chttp2/transport/internal.h
  51. 24
      src/core/ext/transport/chttp2/transport/parsing.cc
  52. 2
      src/core/ext/transport/chttp2/transport/ping_callbacks.cc
  53. 2
      src/core/ext/transport/chttp2/transport/ping_callbacks.h
  54. 8
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  55. 33
      src/core/ext/transport/chttp2/transport/writing.cc
  56. 3
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  57. 23
      src/core/ext/transport/inproc/inproc_plugin.cc
  58. 2
      src/core/ext/transport/inproc/inproc_transport.h
  59. 6
      src/core/ext/transport/inproc/legacy_inproc_transport.cc
  60. 2
      src/core/ext/transport/inproc/legacy_inproc_transport.h
  61. 13
      src/core/handshaker/handshaker.cc
  62. 13
      src/core/handshaker/security/secure_endpoint.cc
  63. 2
      src/core/handshaker/security/secure_endpoint.h
  64. 5
      src/core/lib/channel/channel_stack.cc
  65. 4
      src/core/lib/channel/channel_stack.h
  66. 1
      src/core/lib/channel/channel_stack_builder_impl.cc
  67. 19
      src/core/lib/channel/channel_stack_trace.cc
  68. 24
      src/core/lib/channel/channel_stack_trace.h
  69. 1
      src/core/lib/channel/connected_channel.cc
  70. 80
      src/core/lib/channel/promise_based_filter.cc
  71. 103
      src/core/lib/debug/trace.cc
  72. 99
      src/core/lib/debug/trace.h
  73. 242
      src/core/lib/debug/trace_flags.cc
  74. 115
      src/core/lib/debug/trace_flags.h
  75. 363
      src/core/lib/debug/trace_flags.yaml
  76. 115
      src/core/lib/debug/trace_impl.h
  77. 7
      src/core/lib/event_engine/ares_resolver.cc
  78. 4
      src/core/lib/event_engine/ares_resolver.h
  79. 2
      src/core/lib/event_engine/cf_engine/cf_engine.cc
  80. 39
      src/core/lib/event_engine/cf_engine/dns_service_resolver.cc
  81. 9
      src/core/lib/event_engine/forkable.cc
  82. 11
      src/core/lib/event_engine/forkable.h
  83. 10
      src/core/lib/event_engine/posix_engine/posix_engine.cc
  84. 10
      src/core/lib/event_engine/posix_engine/timer_manager.cc
  85. 25
      src/core/lib/event_engine/trace.cc
  86. 17
      src/core/lib/event_engine/trace.h
  87. 4
      src/core/lib/event_engine/windows/windows_endpoint.cc
  88. 8
      src/core/lib/event_engine/windows/windows_engine.cc
  89. 70
      src/core/lib/gprpp/glob.cc
  90. 19
      src/core/lib/gprpp/glob.h
  91. 100
      src/core/lib/gprpp/work_serializer.cc
  92. 40
      src/core/lib/iomgr/call_combiner.cc
  93. 4
      src/core/lib/iomgr/call_combiner.h
  94. 10
      src/core/lib/iomgr/cfstream_handle.cc
  95. 6
      src/core/lib/iomgr/closure.h
  96. 6
      src/core/lib/iomgr/combiner.cc
  97. 2
      src/core/lib/iomgr/combiner.h
  98. 2
      src/core/lib/iomgr/endpoint.cc
  99. 27
      src/core/lib/iomgr/endpoint_cfstream.cc
  100. 4
      src/core/lib/iomgr/error.cc
  101. Some files were not shown because too many files have changed in this diff Show More

4
.gitattributes vendored

@ -35,3 +35,7 @@ src/core/lib/experiments/experiments.h linguist-generated=true
src/core/lib/experiments/experiments.cc linguist-generated=true
bazel/experiments.bzl linguist-generated=true
test/cpp/microbenchmarks/huffman_geometries/** linguist-generated=true
doc/trace_flags.md linguist-generated=true
src/core/lib/debug/trace_flags.h linguist-generated=true
src/core/lib/debug/trace_flags.cc linguist-generated=true
src/python/grpcio_observability/observability_lib_deps.py linguist-generated=true

56
BUILD

@ -1408,17 +1408,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "call_trace",
hdrs = [
"//src/core:lib/surface/call_trace.h",
],
language = "c++",
deps = [
"grpc_trace",
],
)
grpc_cc_library(
name = "dynamic_annotations",
hdrs = [
@ -1487,9 +1476,6 @@ grpc_cc_library(
grpc_cc_library(
name = "api_trace",
srcs = [
"//src/core:lib/surface/api_trace.cc",
],
hdrs = [
"//src/core:lib/surface/api_trace.h",
],
@ -1536,7 +1522,6 @@ grpc_cc_library(
"//src/core:lib/iomgr/ev_epoll1_linux.cc",
"//src/core:lib/iomgr/ev_poll_posix.cc",
"//src/core:lib/iomgr/ev_posix.cc",
"//src/core:lib/iomgr/ev_windows.cc",
"//src/core:lib/iomgr/fork_posix.cc",
"//src/core:lib/iomgr/fork_windows.cc",
"//src/core:lib/iomgr/gethostname_fallback.cc",
@ -1714,7 +1699,6 @@ grpc_cc_library(
"//src/core:posix_event_engine_endpoint",
"//src/core:resolved_address",
"//src/core:resource_quota",
"//src/core:resource_quota_trace",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:slice_cast",
@ -2049,7 +2033,6 @@ grpc_cc_library(
deps = [
"api_trace",
"call_combiner",
"call_trace",
"call_tracer",
"channel",
"channel_arg_names",
@ -2086,7 +2069,6 @@ grpc_cc_library(
"//src/core:channel_args_preconditioning",
"//src/core:channel_fwd",
"//src/core:channel_init",
"//src/core:channel_stack_trace",
"//src/core:channel_stack_type",
"//src/core:closure",
"//src/core:compression",
@ -2116,7 +2098,6 @@ grpc_cc_library(
"//src/core:pipe",
"//src/core:poll",
"//src/core:promise_status",
"//src/core:promise_trace",
"//src/core:race",
"//src/core:ref_counted",
"//src/core:seq",
@ -2312,7 +2293,6 @@ grpc_cc_library(
visibility = ["@grpc:public"],
deps = [
"api_trace",
"call_trace",
"channel_arg_names",
"channelz",
"config",
@ -2348,7 +2328,6 @@ grpc_cc_library(
"//src/core:poll",
"//src/core:ref_counted",
"//src/core:resource_quota",
"//src/core:resource_quota_trace",
"//src/core:seq",
"//src/core:slice",
"//src/core:slice_refcount",
@ -3006,11 +2985,19 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_trace",
srcs = ["//src/core:lib/debug/trace.cc"],
hdrs = ["//src/core:lib/debug/trace.h"],
srcs = [
"//src/core:lib/debug/trace.cc",
"//src/core:lib/debug/trace_flags.cc",
],
hdrs = [
"//src/core:lib/debug/trace.h",
"//src/core:lib/debug/trace_flags.h",
"//src/core:lib/debug/trace_impl.h",
],
external_deps = [
"absl/log:log",
"absl/log",
"absl/strings",
"absl/container:flat_hash_map",
],
language = "c++",
visibility = ["@grpc:trace"],
@ -3018,6 +3005,8 @@ grpc_cc_library(
"config_vars",
"gpr",
"grpc_public_hdrs",
"//src/core:glob",
"//src/core:no_destruct",
],
)
@ -4314,7 +4303,6 @@ grpc_cc_library(
language = "c++",
visibility = ["@grpc:http"],
deps = [
"call_trace",
"call_tracer",
"channel_arg_names",
"config",
@ -4531,20 +4519,6 @@ grpc_cc_library(
deps = ["gpr"],
)
grpc_cc_library(
name = "http_trace",
srcs = [
"//src/core:ext/transport/chttp2/transport/http_trace.cc",
],
hdrs = [
"//src/core:ext/transport/chttp2/transport/http_trace.h",
],
deps = [
"gpr_platform",
"grpc_trace",
],
)
grpc_cc_library(
name = "hpack_parser_table",
srcs = [
@ -4565,7 +4539,6 @@ grpc_cc_library(
"gpr_platform",
"grpc_trace",
"hpack_parse_result",
"http_trace",
"//src/core:hpack_constants",
"//src/core:metadata_batch",
"//src/core:no_destruct",
@ -4668,7 +4641,6 @@ grpc_cc_library(
"grpc_base",
"grpc_public_hdrs",
"grpc_trace",
"http_trace",
"//src/core:hpack_constants",
"//src/core:hpack_encoder_table",
"//src/core:metadata_batch",
@ -4801,7 +4773,6 @@ grpc_cc_library(
"hpack_encoder",
"hpack_parser",
"hpack_parser_table",
"http_trace",
"httpcli",
"iomgr",
"iomgr_buffer_list",
@ -4838,7 +4809,6 @@ grpc_cc_library(
"//src/core:random_early_detection",
"//src/core:ref_counted",
"//src/core:resource_quota",
"//src/core:resource_quota_trace",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:slice_refcount",

334
CMakeLists.txt generated

File diff suppressed because it is too large Load Diff

12
Makefile generated

@ -724,7 +724,6 @@ LIBGRPC_SRC = \
src/core/ext/transport/chttp2/transport/hpack_parser.cc \
src/core/ext/transport/chttp2/transport/hpack_parser_table.cc \
src/core/ext/transport/chttp2/transport/http2_settings.cc \
src/core/ext/transport/chttp2/transport/http_trace.cc \
src/core/ext/transport/chttp2/transport/huffsyms.cc \
src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc \
src/core/ext/transport/chttp2/transport/parsing.cc \
@ -735,7 +734,6 @@ LIBGRPC_SRC = \
src/core/ext/transport/chttp2/transport/varint.cc \
src/core/ext/transport/chttp2/transport/write_size_policy.cc \
src/core/ext/transport/chttp2/transport/writing.cc \
src/core/ext/transport/inproc/inproc_plugin.cc \
src/core/ext/transport/inproc/inproc_transport.cc \
src/core/ext/transport/inproc/legacy_inproc_transport.cc \
src/core/ext/upb-gen/envoy/admin/v3/certs.upb_minitable.c \
@ -1073,7 +1071,6 @@ LIBGRPC_SRC = \
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
src/core/lib/channel/channel_stack_builder_impl.cc \
src/core/lib/channel/channel_stack_trace.cc \
src/core/lib/channel/connected_channel.cc \
src/core/lib/channel/promise_based_filter.cc \
src/core/lib/channel/status_util.cc \
@ -1086,6 +1083,7 @@ LIBGRPC_SRC = \
src/core/lib/config/load_config.cc \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/trace.cc \
src/core/lib/debug/trace_flags.cc \
src/core/lib/event_engine/ares_resolver.cc \
src/core/lib/event_engine/cf_engine/cf_engine.cc \
src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc \
@ -1124,7 +1122,6 @@ LIBGRPC_SRC = \
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/trace.cc \
src/core/lib/event_engine/utils.cc \
src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc \
src/core/lib/event_engine/windows/iocp.cc \
@ -1140,6 +1137,7 @@ LIBGRPC_SRC = \
src/core/lib/gprpp/dump_args.cc \
src/core/lib/gprpp/examine_stack.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/glob.cc \
src/core/lib/gprpp/host_port.cc \
src/core/lib/gprpp/linux/env.cc \
src/core/lib/gprpp/load_file.cc \
@ -1179,7 +1177,6 @@ LIBGRPC_SRC = \
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
src/core/lib/iomgr/ev_posix.cc \
src/core/lib/iomgr/ev_windows.cc \
src/core/lib/iomgr/event_engine_shims/closure.cc \
src/core/lib/iomgr/event_engine_shims/endpoint.cc \
src/core/lib/iomgr/event_engine_shims/tcp_client.cc \
@ -1244,7 +1241,6 @@ LIBGRPC_SRC = \
src/core/lib/promise/activity.cc \
src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resource_quota/api.cc \
src/core/lib/resource_quota/arena.cc \
src/core/lib/resource_quota/connection_quota.cc \
@ -1252,7 +1248,6 @@ LIBGRPC_SRC = \
src/core/lib/resource_quota/periodic_update.cc \
src/core/lib/resource_quota/resource_quota.cc \
src/core/lib/resource_quota/thread_quota.cc \
src/core/lib/resource_quota/trace.cc \
src/core/lib/security/authorization/audit_logging.cc \
src/core/lib/security/authorization/authorization_policy_provider_vtable.cc \
src/core/lib/security/authorization/evaluate_args.cc \
@ -1318,9 +1313,7 @@ LIBGRPC_SRC = \
src/core/lib/slice/percent_encoding.cc \
src/core/lib/slice/slice.cc \
src/core/lib/slice/slice_buffer.cc \
src/core/lib/slice/slice_refcount.cc \
src/core/lib/slice/slice_string_helpers.cc \
src/core/lib/surface/api_trace.cc \
src/core/lib/surface/byte_buffer.cc \
src/core/lib/surface/byte_buffer_reader.cc \
src/core/lib/surface/call.cc \
@ -1411,7 +1404,6 @@ LIBGRPC_SRC = \
src/core/resolver/sockaddr/sockaddr_resolver.cc \
src/core/resolver/xds/xds_dependency_manager.cc \
src/core/resolver/xds/xds_resolver.cc \
src/core/resolver/xds/xds_resolver_trace.cc \
src/core/server/server.cc \
src/core/server/server_call_tracer_filter.cc \
src/core/server/server_config_selector_filter.cc \

21
Package.swift generated

@ -237,8 +237,6 @@ let package = Package(
"src/core/ext/transport/chttp2/transport/hpack_parser_table.h",
"src/core/ext/transport/chttp2/transport/http2_settings.cc",
"src/core/ext/transport/chttp2/transport/http2_settings.h",
"src/core/ext/transport/chttp2/transport/http_trace.cc",
"src/core/ext/transport/chttp2/transport/http_trace.h",
"src/core/ext/transport/chttp2/transport/huffsyms.cc",
"src/core/ext/transport/chttp2/transport/huffsyms.h",
"src/core/ext/transport/chttp2/transport/internal.h",
@ -258,7 +256,6 @@ let package = Package(
"src/core/ext/transport/chttp2/transport/write_size_policy.cc",
"src/core/ext/transport/chttp2/transport/write_size_policy.h",
"src/core/ext/transport/chttp2/transport/writing.cc",
"src/core/ext/transport/inproc/inproc_plugin.cc",
"src/core/ext/transport/inproc/inproc_transport.cc",
"src/core/ext/transport/inproc/inproc_transport.h",
"src/core/ext/transport/inproc/legacy_inproc_transport.cc",
@ -1103,8 +1100,6 @@ let package = Package(
"src/core/lib/channel/channel_stack_builder.h",
"src/core/lib/channel/channel_stack_builder_impl.cc",
"src/core/lib/channel/channel_stack_builder_impl.h",
"src/core/lib/channel/channel_stack_trace.cc",
"src/core/lib/channel/channel_stack_trace.h",
"src/core/lib/channel/connected_channel.cc",
"src/core/lib/channel/connected_channel.h",
"src/core/lib/channel/promise_based_filter.cc",
@ -1127,6 +1122,9 @@ let package = Package(
"src/core/lib/debug/event_log.h",
"src/core/lib/debug/trace.cc",
"src/core/lib/debug/trace.h",
"src/core/lib/debug/trace_flags.cc",
"src/core/lib/debug/trace_flags.h",
"src/core/lib/debug/trace_impl.h",
"src/core/lib/event_engine/ares_resolver.cc",
"src/core/lib/event_engine/ares_resolver.h",
"src/core/lib/event_engine/cf_engine/cf_engine.cc",
@ -1219,7 +1217,6 @@ let package = Package(
"src/core/lib/event_engine/thready_event_engine/thready_event_engine.h",
"src/core/lib/event_engine/time_util.cc",
"src/core/lib/event_engine/time_util.h",
"src/core/lib/event_engine/trace.cc",
"src/core/lib/event_engine/trace.h",
"src/core/lib/event_engine/utils.cc",
"src/core/lib/event_engine/utils.h",
@ -1262,6 +1259,8 @@ let package = Package(
"src/core/lib/gprpp/examine_stack.h",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/fork.h",
"src/core/lib/gprpp/glob.cc",
"src/core/lib/gprpp/glob.h",
"src/core/lib/gprpp/host_port.cc",
"src/core/lib/gprpp/host_port.h",
"src/core/lib/gprpp/if_list.h",
@ -1351,7 +1350,6 @@ let package = Package(
"src/core/lib/iomgr/ev_poll_posix.h",
"src/core/lib/iomgr/ev_posix.cc",
"src/core/lib/iomgr/ev_posix.h",
"src/core/lib/iomgr/ev_windows.cc",
"src/core/lib/iomgr/event_engine_shims/closure.cc",
"src/core/lib/iomgr/event_engine_shims/closure.h",
"src/core/lib/iomgr/event_engine_shims/endpoint.cc",
@ -1494,8 +1492,6 @@ let package = Package(
"src/core/lib/promise/sleep.cc",
"src/core/lib/promise/sleep.h",
"src/core/lib/promise/status_flag.h",
"src/core/lib/promise/trace.cc",
"src/core/lib/promise/trace.h",
"src/core/lib/promise/try_join.h",
"src/core/lib/promise/try_seq.h",
"src/core/lib/resource_quota/api.cc",
@ -1512,8 +1508,6 @@ let package = Package(
"src/core/lib/resource_quota/resource_quota.h",
"src/core/lib/resource_quota/thread_quota.cc",
"src/core/lib/resource_quota/thread_quota.h",
"src/core/lib/resource_quota/trace.cc",
"src/core/lib/resource_quota/trace.h",
"src/core/lib/security/authorization/audit_logging.cc",
"src/core/lib/security/authorization/audit_logging.h",
"src/core/lib/security/authorization/authorization_engine.h",
@ -1642,11 +1636,9 @@ let package = Package(
"src/core/lib/slice/slice_buffer.cc",
"src/core/lib/slice/slice_buffer.h",
"src/core/lib/slice/slice_internal.h",
"src/core/lib/slice/slice_refcount.cc",
"src/core/lib/slice/slice_refcount.h",
"src/core/lib/slice/slice_string_helpers.cc",
"src/core/lib/slice/slice_string_helpers.h",
"src/core/lib/surface/api_trace.cc",
"src/core/lib/surface/api_trace.h",
"src/core/lib/surface/byte_buffer.cc",
"src/core/lib/surface/byte_buffer_reader.cc",
@ -1655,7 +1647,6 @@ let package = Package(
"src/core/lib/surface/call_details.cc",
"src/core/lib/surface/call_log_batch.cc",
"src/core/lib/surface/call_test_only.h",
"src/core/lib/surface/call_trace.h",
"src/core/lib/surface/call_utils.cc",
"src/core/lib/surface/call_utils.h",
"src/core/lib/surface/channel.cc",
@ -1823,8 +1814,6 @@ let package = Package(
"src/core/resolver/xds/xds_dependency_manager.h",
"src/core/resolver/xds/xds_resolver.cc",
"src/core/resolver/xds/xds_resolver_attributes.h",
"src/core/resolver/xds/xds_resolver_trace.cc",
"src/core/resolver/xds/xds_resolver_trace.h",
"src/core/server/server.cc",
"src/core/server/server.h",
"src/core/server/server_call_tracer_filter.cc",

File diff suppressed because it is too large Load Diff

12
config.m4 generated

@ -99,7 +99,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/transport/chttp2/transport/hpack_parser.cc \
src/core/ext/transport/chttp2/transport/hpack_parser_table.cc \
src/core/ext/transport/chttp2/transport/http2_settings.cc \
src/core/ext/transport/chttp2/transport/http_trace.cc \
src/core/ext/transport/chttp2/transport/huffsyms.cc \
src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.cc \
src/core/ext/transport/chttp2/transport/parsing.cc \
@ -110,7 +109,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/ext/transport/chttp2/transport/varint.cc \
src/core/ext/transport/chttp2/transport/write_size_policy.cc \
src/core/ext/transport/chttp2/transport/writing.cc \
src/core/ext/transport/inproc/inproc_plugin.cc \
src/core/ext/transport/inproc/inproc_transport.cc \
src/core/ext/transport/inproc/legacy_inproc_transport.cc \
src/core/ext/upb-gen/envoy/admin/v3/certs.upb_minitable.c \
@ -448,7 +446,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/channel/channel_stack.cc \
src/core/lib/channel/channel_stack_builder.cc \
src/core/lib/channel/channel_stack_builder_impl.cc \
src/core/lib/channel/channel_stack_trace.cc \
src/core/lib/channel/connected_channel.cc \
src/core/lib/channel/promise_based_filter.cc \
src/core/lib/channel/status_util.cc \
@ -461,6 +458,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/config/load_config.cc \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/trace.cc \
src/core/lib/debug/trace_flags.cc \
src/core/lib/event_engine/ares_resolver.cc \
src/core/lib/event_engine/cf_engine/cf_engine.cc \
src/core/lib/event_engine/cf_engine/cfstream_endpoint.cc \
@ -499,7 +497,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/event_engine/thread_pool/work_stealing_thread_pool.cc \
src/core/lib/event_engine/thready_event_engine/thready_event_engine.cc \
src/core/lib/event_engine/time_util.cc \
src/core/lib/event_engine/trace.cc \
src/core/lib/event_engine/utils.cc \
src/core/lib/event_engine/windows/grpc_polled_fd_windows.cc \
src/core/lib/event_engine/windows/iocp.cc \
@ -515,6 +512,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/gprpp/dump_args.cc \
src/core/lib/gprpp/examine_stack.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/glob.cc \
src/core/lib/gprpp/host_port.cc \
src/core/lib/gprpp/linux/env.cc \
src/core/lib/gprpp/load_file.cc \
@ -554,7 +552,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/iomgr/ev_epoll1_linux.cc \
src/core/lib/iomgr/ev_poll_posix.cc \
src/core/lib/iomgr/ev_posix.cc \
src/core/lib/iomgr/ev_windows.cc \
src/core/lib/iomgr/event_engine_shims/closure.cc \
src/core/lib/iomgr/event_engine_shims/endpoint.cc \
src/core/lib/iomgr/event_engine_shims/tcp_client.cc \
@ -619,7 +616,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/promise/activity.cc \
src/core/lib/promise/party.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/promise/trace.cc \
src/core/lib/resource_quota/api.cc \
src/core/lib/resource_quota/arena.cc \
src/core/lib/resource_quota/connection_quota.cc \
@ -627,7 +623,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/resource_quota/periodic_update.cc \
src/core/lib/resource_quota/resource_quota.cc \
src/core/lib/resource_quota/thread_quota.cc \
src/core/lib/resource_quota/trace.cc \
src/core/lib/security/authorization/audit_logging.cc \
src/core/lib/security/authorization/authorization_policy_provider_vtable.cc \
src/core/lib/security/authorization/evaluate_args.cc \
@ -693,9 +688,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/slice/percent_encoding.cc \
src/core/lib/slice/slice.cc \
src/core/lib/slice/slice_buffer.cc \
src/core/lib/slice/slice_refcount.cc \
src/core/lib/slice/slice_string_helpers.cc \
src/core/lib/surface/api_trace.cc \
src/core/lib/surface/byte_buffer.cc \
src/core/lib/surface/byte_buffer_reader.cc \
src/core/lib/surface/call.cc \
@ -786,7 +779,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/resolver/sockaddr/sockaddr_resolver.cc \
src/core/resolver/xds/xds_dependency_manager.cc \
src/core/resolver/xds/xds_resolver.cc \
src/core/resolver/xds/xds_resolver_trace.cc \
src/core/server/server.cc \
src/core/server/server_call_tracer_filter.cc \
src/core/server/server_config_selector_filter.cc \

12
config.w32 generated

@ -64,7 +64,6 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\transport\\chttp2\\transport\\hpack_parser.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\hpack_parser_table.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\http2_settings.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\http_trace.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\huffsyms.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\max_concurrent_streams_policy.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\parsing.cc " +
@ -75,7 +74,6 @@ if (PHP_GRPC != "no") {
"src\\core\\ext\\transport\\chttp2\\transport\\varint.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\write_size_policy.cc " +
"src\\core\\ext\\transport\\chttp2\\transport\\writing.cc " +
"src\\core\\ext\\transport\\inproc\\inproc_plugin.cc " +
"src\\core\\ext\\transport\\inproc\\inproc_transport.cc " +
"src\\core\\ext\\transport\\inproc\\legacy_inproc_transport.cc " +
"src\\core\\ext\\upb-gen\\envoy\\admin\\v3\\certs.upb_minitable.c " +
@ -413,7 +411,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\channel\\channel_stack.cc " +
"src\\core\\lib\\channel\\channel_stack_builder.cc " +
"src\\core\\lib\\channel\\channel_stack_builder_impl.cc " +
"src\\core\\lib\\channel\\channel_stack_trace.cc " +
"src\\core\\lib\\channel\\connected_channel.cc " +
"src\\core\\lib\\channel\\promise_based_filter.cc " +
"src\\core\\lib\\channel\\status_util.cc " +
@ -426,6 +423,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\config\\load_config.cc " +
"src\\core\\lib\\debug\\event_log.cc " +
"src\\core\\lib\\debug\\trace.cc " +
"src\\core\\lib\\debug\\trace_flags.cc " +
"src\\core\\lib\\event_engine\\ares_resolver.cc " +
"src\\core\\lib\\event_engine\\cf_engine\\cf_engine.cc " +
"src\\core\\lib\\event_engine\\cf_engine\\cfstream_endpoint.cc " +
@ -464,7 +462,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\event_engine\\thread_pool\\work_stealing_thread_pool.cc " +
"src\\core\\lib\\event_engine\\thready_event_engine\\thready_event_engine.cc " +
"src\\core\\lib\\event_engine\\time_util.cc " +
"src\\core\\lib\\event_engine\\trace.cc " +
"src\\core\\lib\\event_engine\\utils.cc " +
"src\\core\\lib\\event_engine\\windows\\grpc_polled_fd_windows.cc " +
"src\\core\\lib\\event_engine\\windows\\iocp.cc " +
@ -480,6 +477,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\gprpp\\dump_args.cc " +
"src\\core\\lib\\gprpp\\examine_stack.cc " +
"src\\core\\lib\\gprpp\\fork.cc " +
"src\\core\\lib\\gprpp\\glob.cc " +
"src\\core\\lib\\gprpp\\host_port.cc " +
"src\\core\\lib\\gprpp\\linux\\env.cc " +
"src\\core\\lib\\gprpp\\load_file.cc " +
@ -519,7 +517,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\iomgr\\ev_epoll1_linux.cc " +
"src\\core\\lib\\iomgr\\ev_poll_posix.cc " +
"src\\core\\lib\\iomgr\\ev_posix.cc " +
"src\\core\\lib\\iomgr\\ev_windows.cc " +
"src\\core\\lib\\iomgr\\event_engine_shims\\closure.cc " +
"src\\core\\lib\\iomgr\\event_engine_shims\\endpoint.cc " +
"src\\core\\lib\\iomgr\\event_engine_shims\\tcp_client.cc " +
@ -584,7 +581,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\promise\\activity.cc " +
"src\\core\\lib\\promise\\party.cc " +
"src\\core\\lib\\promise\\sleep.cc " +
"src\\core\\lib\\promise\\trace.cc " +
"src\\core\\lib\\resource_quota\\api.cc " +
"src\\core\\lib\\resource_quota\\arena.cc " +
"src\\core\\lib\\resource_quota\\connection_quota.cc " +
@ -592,7 +588,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\resource_quota\\periodic_update.cc " +
"src\\core\\lib\\resource_quota\\resource_quota.cc " +
"src\\core\\lib\\resource_quota\\thread_quota.cc " +
"src\\core\\lib\\resource_quota\\trace.cc " +
"src\\core\\lib\\security\\authorization\\audit_logging.cc " +
"src\\core\\lib\\security\\authorization\\authorization_policy_provider_vtable.cc " +
"src\\core\\lib\\security\\authorization\\evaluate_args.cc " +
@ -658,9 +653,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\slice\\percent_encoding.cc " +
"src\\core\\lib\\slice\\slice.cc " +
"src\\core\\lib\\slice\\slice_buffer.cc " +
"src\\core\\lib\\slice\\slice_refcount.cc " +
"src\\core\\lib\\slice\\slice_string_helpers.cc " +
"src\\core\\lib\\surface\\api_trace.cc " +
"src\\core\\lib\\surface\\byte_buffer.cc " +
"src\\core\\lib\\surface\\byte_buffer_reader.cc " +
"src\\core\\lib\\surface\\call.cc " +
@ -751,7 +744,6 @@ if (PHP_GRPC != "no") {
"src\\core\\resolver\\sockaddr\\sockaddr_resolver.cc " +
"src\\core\\resolver\\xds\\xds_dependency_manager.cc " +
"src\\core\\resolver\\xds\\xds_resolver.cc " +
"src\\core\\resolver\\xds\\xds_resolver_trace.cc " +
"src\\core\\server\\server.cc " +
"src\\core\\server\\server_call_tracer_filter.cc " +
"src\\core\\server\\server_config_selector_filter.cc " +

@ -41,103 +41,10 @@ some configuration as environment variables that can be set.
- legacy - the (deprecated) original polling engine for gRPC
* GRPC_TRACE
A comma separated list of tracers that provide additional insight into how
gRPC C core is processing requests via debug logs. Available tracers include:
- api - traces api calls to the C core
- bdp_estimator - traces behavior of bdp estimation logic
- call_error - traces the possible errors contributing to final call status
- cares_resolver - traces operations of the c-ares based DNS resolver
- cares_address_sorting - traces operations of the c-ares based DNS
resolver's resolved address sorter
- cds_lb - traces cds LB policy
- channel - traces operations on the C core channel stack
- channel_stack - traces the set of filters in a channel stack upon
construction
- client_channel - traces client channel control plane activity, including
resolver and load balancing policy interaction
- client_channel_call - traces client channel call activity related to name
resolution
- client_channel_lb_call - traces client channel call activity related
to load balancing picking
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- cronet - traces state in the cronet transport engine
- dns_resolver - traces state in the native DNS resolver
- executor - traces grpc's internal thread pool ('the executor')
- glb - traces the grpclb load balancer
- handshaker - traces handshaking state
- health_check_client - traces health checking client code
- http - traces state in the http2 transport engine
- http2_stream_state - traces all http2 stream state mutations.
- http2_ping - traces pings/ping acks/antagonist writes in http2 stack.
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport
- http_keepalive - traces gRPC keepalive pings
- flowctl - traces http2 flow control
- op_failure - traces error information when failure is pushed onto a
completion queue
- pick_first - traces the pick first load balancing policy
- plugin_credentials - traces plugin credentials
- pollable_refcount - traces reference counting of 'pollable' objects (only
in DEBUG)
- priority_lb - traces priority LB policy
- resource_quota - trace resource quota objects internals
- ring_hash_lb - traces the ring hash load balancing policy
- rls_lb - traces the RLS load balancing policy
- round_robin - traces the round_robin load balancing policy
- weighted_round_robin_lb - traces the weighted_round_robin load balancing
policy
- queue_pluck
- grpc_authz_api - traces gRPC authorization
- server_channel - lightweight trace of significant server channel events
- secure_endpoint - traces bytes flowing through encrypted channels
- subchannel - traces the connectivity state of subchannel
- subchannel_pool - traces subchannel pool
- timer - timers (alarms) in the grpc internals
- timer_check - more detailed trace of timer logic in grpc internals
- transport_security - traces metadata about secure channel establishment
- tcp - traces bytes in and out of a channel
- tsi - traces tsi transport security
- weighted_target_lb - traces weighted_target LB policy
- xds_client - traces xds client
- xds_cluster_manager_lb - traces cluster manager LB policy
- xds_cluster_impl_lb - traces cluster impl LB policy
- xds_resolver - traces xds resolver
The following tracers will only run in binaries built in DEBUG mode. This is
accomplished by invoking `CONFIG=dbg make <target>`
- metadata - tracks creation and mutation of metadata
- combiner - traces combiner lock state
- call_combiner - traces call combiner state
- closure - tracks closure creation, scheduling, and completion
- fd_trace - traces fd create(), shutdown() and close() calls for channel fds.
- pending_tags - traces still-in-progress tags on completion queues
- polling - traces the selected polling engine
- polling_api - traces the api calls to polling engine
- subchannel_refcount
- queue_refcount
- error_refcount
- stream_refcount
- slice_refcount
- workqueue_refcount
- fd_refcount
- cq_refcount
- auth_context_refcount
- security_connector_refcount
- resolver_refcount
- lb_policy_refcount
- chttp2_refcount
'all' can additionally be used to turn all traces on.
Individual traces can be disabled by prefixing them with '-'.
'refcount' will turn on all of the tracers for refcount debugging.
if 'list_tracers' is present, then all of the available tracers will be
printed when the program starts up.
Example:
export GRPC_TRACE=all,-pending_tags
A comma-separated list of tracer names or glob patterns that provide
additional insight into how gRPC C core is processing requests via debug logs.
Available tracers and their usage can be found in
[gRPC Trace Flags](trace_flags.md)
* GRPC_VERBOSITY
<!-- BEGIN_GOOGLE_INTERNAL_DOCUMENTATION"

120
doc/trace_flags.md generated

@ -0,0 +1,120 @@
<!---
Automatically generated by tools/codegen/core/gen_trace_flags.py
--->
gRPC Trace Flags
----------------
The `GRPC_TRACE` environment variable supports a comma-separated list of tracer
names or glob patterns that provide additional insight into how gRPC C core is
processing requests via debug logs. Available tracers include:
- api - API calls to the C core.
- backend_metric - C++ backend metric recorder APIs.
- backend_metric_filter - Filter that populates backend metric data in server trailing metadata.
- bdp_estimator - Behavior of bdp estimation logic.
- call - Traces operations on a call through the gRPC stack.
- call_error - Possible errors contributing to final call statuses.
- cares_address_sorting - Operations of the c-ares based DNS resolver's address sorter.
- cares_resolver - Operations of the c-ares based DNS resolver.
- cds_lb - CDS LB policy.
- channel - Operations on the C core channel stack.
- channel_stack - Construction of the set of filters in a channel stack.
- chaotic_good - Chaotic good transport.
- chttp2_hpack_parser - HTTP/2 HPACK parser.
- chttp2_new_stream - HTTP/2 incoming stream creation.
- client_channel - Client channel control plane activity, including resolver and load balancing policy interaction.
- client_channel_call - Client channel call activity related to name resolution.
- client_channel_lb_call - Client channel call activity related to load balancing picking.
- client_idle_filter - Client idleness filter.
- compression - Compression operations.
- connectivity_state - Connectivity state changes to channels.
- cronet - Cronet transport engine.
- dns_resolver - The active DNS resolver.
- environment_autodetect - GCP environment auto-detection.
- event_engine - High-level EventEngine operations.
- event_engine_client_channel_resolver - EventEngine-based client channel resolver state and events.
- event_engine_dns - EventEngine DNS resolver.
- event_engine_endpoint - EventEngine Endpoint operations.
- event_engine_endpoint_data - Detailed dump of EventEngine endpoint TCP data.
- event_engine_poller - EventEngine Poller events.
- executor - gRPC's legacy thread pool ('the executor').
- fault_injection_filter - Fault injection.
- flowctl - Http2 flow control.
- fork - Fork support.
- glb - gRPClb load balancer.
- grpc_authz_api - gRPC authorization.
- handshaker - Handshaking state.
- health_check_client - Health checking client code.
- http - Http2 transport engine.
- http1 - HTTP/1.x operations performed by gRPC.
- http2_ping - Pings/ping acks/antagonist writes in http2 stack.
- http2_stream_state - Http2 stream state mutations.
- http_keepalive - gRPC keepalive pings.
- inproc - In-process transport.
- metadata_query - GCP metadata queries.
- op_failure - Error information when failure is pushed onto a completion queue. The `api` tracer must be enabled for this flag to have any effect.
- orca_client - Out-of-band backend metric reporting client.
- outlier_detection_lb - Outlier detection.
- pick_first - Pick first load balancing policy.
- plugin_credentials - Plugin credentials.
- priority_lb - Priority LB policy.
- queue_pluck - Completion queue plucking. The `api` tracer must be enabled for this flag to have any effect.
- resource_quota - Resource quota objects internals.
- retry - Call retries.
- ring_hash_lb - Ring hash load balancing policy.
- rls_lb - RLS load balancing policy.
- round_robin - Round robin load balancing policy.
- secure_endpoint - Bytes flowing through encrypted channels.
- server_channel - Lightweight trace of significant server channel events.
- stateful_session_filter - Stateful session affinity.
- subchannel - Connectivity state of subchannels.
- subchannel_pool - Subchannel pool.
- tcp - Bytes in and out of a channel.
- timer - Timers (alarms) in the grpc internals.
- timer_check - more detailed trace of timer logic in grpc internals.
- tsi - TSI transport security.
- weighted_round_robin_lb - Weighted round robin load balancing policy.
- weighted_target_lb - Weighted target LB policy.
- xds_client - XDS client.
- xds_client_refcount - Refcount of XDS client.
- xds_cluster_impl_lb - XDS Cluster impl LB policy.
- xds_cluster_manager_lb - XDS Cluster manager LB policy.
- xds_override_host_lb - XDS Override host LB.
- xds_resolver - XDS Resolver.
- xds_server_config_fetcher - XDS Server config fetcher.
- xds_wrr_locality_lb - XDS WRR locality LB policy.
The following tracers will only run in binaries built in DEBUG mode. This is
accomplished by invoking `bazel build --config=dbg <target>`
- auth_context_refcount - Auth context refcounting.
- call_combiner - Call combiner state.
- call_refcount - Refcount on call.
- closure - Legacy closure creation, scheduling, and completion.
- combiner - Combiner lock state.
- cq_refcount - Completion queue refcounting.
- error_refcount - Error refcounting.
- fd_refcount - File descriptor refcounting.
- fd_trace - Legacy file descriptor create(), shutdown() and close() calls for channel fds.
- lb_policy_refcount - LB policy refcounting.
- party_state - Coordination of activities related to a call.
- pending_tags - Still-in-progress tags on completion queues. The `api` tracer must be enabled for this flag to have any effect.
- polling - The active polling engine.
- polling_api - API calls to polling engine.
- promise_primitives - Low-level primitives in the promise library.
- resolver_refcount - Resolver refcouting.
- security_connector_refcount - Refcounting for security connectors (part of channel credentials).
- slice_refcount - Slice refcounting.
- stream_refcount - Stream refcounting.
- subchannel_refcount - Subchannel refcounting.
- work_serializer - A synchronization mechanism used to ensure that only one thread is executing at a given time.
Glob patterns and special cases:
- `*` can be used to turn all traces on.
- Individual traces can be disabled by prefixing them with `-`.
- `*refcount*` will turn on all of the tracers for refcount debugging.
- if `list_tracers` is present, then all of the available tracers will be
printed when the program starts up.
Example:
export GRPC_TRACE=*,-pending_tags

18
gRPC-C++.podspec generated

@ -365,7 +365,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser.h',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http_trace.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
@ -882,7 +881,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
'src/core/lib/channel/channel_stack_builder_impl.h',
'src/core/lib/channel/channel_stack_trace.h',
'src/core/lib/channel/connected_channel.h',
'src/core/lib/channel/promise_based_filter.h',
'src/core/lib/channel/status_util.h',
@ -893,6 +891,8 @@ Pod::Spec.new do |s|
'src/core/lib/config/load_config.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/trace.h',
'src/core/lib/debug/trace_flags.h',
'src/core/lib/debug/trace_impl.h',
'src/core/lib/event_engine/ares_resolver.h',
'src/core/lib/event_engine/cf_engine/cf_engine.h',
'src/core/lib/event_engine/cf_engine/cfstream_endpoint.h',
@ -974,6 +974,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/env.h',
'src/core/lib/gprpp/examine_stack.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/glob.h',
'src/core/lib/gprpp/host_port.h',
'src/core/lib/gprpp/if_list.h',
'src/core/lib/gprpp/load_file.h',
@ -1102,7 +1103,6 @@ Pod::Spec.new do |s|
'src/core/lib/promise/seq.h',
'src/core/lib/promise/sleep.h',
'src/core/lib/promise/status_flag.h',
'src/core/lib/promise/trace.h',
'src/core/lib/promise/try_join.h',
'src/core/lib/promise/try_seq.h',
'src/core/lib/resource_quota/api.h',
@ -1112,7 +1112,6 @@ Pod::Spec.new do |s|
'src/core/lib/resource_quota/periodic_update.h',
'src/core/lib/resource_quota/resource_quota.h',
'src/core/lib/resource_quota/thread_quota.h',
'src/core/lib/resource_quota/trace.h',
'src/core/lib/security/authorization/audit_logging.h',
'src/core/lib/security/authorization/authorization_engine.h',
'src/core/lib/security/authorization/authorization_policy_provider.h',
@ -1177,7 +1176,6 @@ Pod::Spec.new do |s|
'src/core/lib/surface/api_trace.h',
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
@ -1260,7 +1258,6 @@ Pod::Spec.new do |s|
'src/core/resolver/server_address.h',
'src/core/resolver/xds/xds_dependency_manager.h',
'src/core/resolver/xds/xds_resolver_attributes.h',
'src/core/resolver/xds/xds_resolver_trace.h',
'src/core/server/server.h',
'src/core/server/server_call_tracer_filter.h',
'src/core/server/server_config_selector.h',
@ -1641,7 +1638,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser.h',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http_trace.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
@ -2158,7 +2154,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
'src/core/lib/channel/channel_stack_builder_impl.h',
'src/core/lib/channel/channel_stack_trace.h',
'src/core/lib/channel/connected_channel.h',
'src/core/lib/channel/promise_based_filter.h',
'src/core/lib/channel/status_util.h',
@ -2169,6 +2164,8 @@ Pod::Spec.new do |s|
'src/core/lib/config/load_config.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/trace.h',
'src/core/lib/debug/trace_flags.h',
'src/core/lib/debug/trace_impl.h',
'src/core/lib/event_engine/ares_resolver.h',
'src/core/lib/event_engine/cf_engine/cf_engine.h',
'src/core/lib/event_engine/cf_engine/cfstream_endpoint.h',
@ -2250,6 +2247,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/env.h',
'src/core/lib/gprpp/examine_stack.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/glob.h',
'src/core/lib/gprpp/host_port.h',
'src/core/lib/gprpp/if_list.h',
'src/core/lib/gprpp/load_file.h',
@ -2378,7 +2376,6 @@ Pod::Spec.new do |s|
'src/core/lib/promise/seq.h',
'src/core/lib/promise/sleep.h',
'src/core/lib/promise/status_flag.h',
'src/core/lib/promise/trace.h',
'src/core/lib/promise/try_join.h',
'src/core/lib/promise/try_seq.h',
'src/core/lib/resource_quota/api.h',
@ -2388,7 +2385,6 @@ Pod::Spec.new do |s|
'src/core/lib/resource_quota/periodic_update.h',
'src/core/lib/resource_quota/resource_quota.h',
'src/core/lib/resource_quota/thread_quota.h',
'src/core/lib/resource_quota/trace.h',
'src/core/lib/security/authorization/audit_logging.h',
'src/core/lib/security/authorization/authorization_engine.h',
'src/core/lib/security/authorization/authorization_policy_provider.h',
@ -2453,7 +2449,6 @@ Pod::Spec.new do |s|
'src/core/lib/surface/api_trace.h',
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
@ -2536,7 +2531,6 @@ Pod::Spec.new do |s|
'src/core/resolver/server_address.h',
'src/core/resolver/xds/xds_dependency_manager.h',
'src/core/resolver/xds/xds_resolver_attributes.h',
'src/core/resolver/xds/xds_resolver_trace.h',
'src/core/server/server.h',
'src/core/server/server_call_tracer_filter.h',
'src/core/server/server_config_selector.h',

30
gRPC-Core.podspec generated

@ -356,8 +356,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http_trace.cc',
'src/core/ext/transport/chttp2/transport/http_trace.h',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
@ -377,7 +375,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/write_size_policy.cc',
'src/core/ext/transport/chttp2/transport/write_size_policy.h',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/transport/inproc/inproc_transport.h',
'src/core/ext/transport/inproc/legacy_inproc_transport.cc',
@ -1222,8 +1219,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/channel_stack_builder.h',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_stack_builder_impl.h',
'src/core/lib/channel/channel_stack_trace.cc',
'src/core/lib/channel/channel_stack_trace.h',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/connected_channel.h',
'src/core/lib/channel/promise_based_filter.cc',
@ -1246,6 +1241,9 @@ Pod::Spec.new do |s|
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/trace.cc',
'src/core/lib/debug/trace.h',
'src/core/lib/debug/trace_flags.cc',
'src/core/lib/debug/trace_flags.h',
'src/core/lib/debug/trace_impl.h',
'src/core/lib/event_engine/ares_resolver.cc',
'src/core/lib/event_engine/ares_resolver.h',
'src/core/lib/event_engine/cf_engine/cf_engine.cc',
@ -1338,7 +1336,6 @@ Pod::Spec.new do |s|
'src/core/lib/event_engine/thready_event_engine/thready_event_engine.h',
'src/core/lib/event_engine/time_util.cc',
'src/core/lib/event_engine/time_util.h',
'src/core/lib/event_engine/trace.cc',
'src/core/lib/event_engine/trace.h',
'src/core/lib/event_engine/utils.cc',
'src/core/lib/event_engine/utils.h',
@ -1381,6 +1378,8 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/examine_stack.h',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/glob.cc',
'src/core/lib/gprpp/glob.h',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/host_port.h',
'src/core/lib/gprpp/if_list.h',
@ -1470,7 +1469,6 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/ev_poll_posix.h',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_posix.h',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine_shims/closure.cc',
'src/core/lib/iomgr/event_engine_shims/closure.h',
'src/core/lib/iomgr/event_engine_shims/endpoint.cc',
@ -1613,8 +1611,6 @@ Pod::Spec.new do |s|
'src/core/lib/promise/sleep.cc',
'src/core/lib/promise/sleep.h',
'src/core/lib/promise/status_flag.h',
'src/core/lib/promise/trace.cc',
'src/core/lib/promise/trace.h',
'src/core/lib/promise/try_join.h',
'src/core/lib/promise/try_seq.h',
'src/core/lib/resource_quota/api.cc',
@ -1631,8 +1627,6 @@ Pod::Spec.new do |s|
'src/core/lib/resource_quota/resource_quota.h',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/thread_quota.h',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/resource_quota/trace.h',
'src/core/lib/security/authorization/audit_logging.cc',
'src/core/lib/security/authorization/audit_logging.h',
'src/core/lib/security/authorization/authorization_engine.h',
@ -1757,11 +1751,9 @@ Pod::Spec.new do |s|
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer.h',
'src/core/lib/slice/slice_internal.h',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_refcount.h',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/slice/slice_string_helpers.h',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/api_trace.h',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
@ -1770,7 +1762,6 @@ Pod::Spec.new do |s|
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.cc',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.cc',
@ -1938,8 +1929,6 @@ Pod::Spec.new do |s|
'src/core/resolver/xds/xds_dependency_manager.h',
'src/core/resolver/xds/xds_resolver.cc',
'src/core/resolver/xds/xds_resolver_attributes.h',
'src/core/resolver/xds/xds_resolver_trace.cc',
'src/core/resolver/xds/xds_resolver_trace.h',
'src/core/server/server.cc',
'src/core/server/server.h',
'src/core/server/server_call_tracer_filter.cc',
@ -2423,7 +2412,6 @@ Pod::Spec.new do |s|
'src/core/ext/transport/chttp2/transport/hpack_parser.h',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.h',
'src/core/ext/transport/chttp2/transport/http2_settings.h',
'src/core/ext/transport/chttp2/transport/http_trace.h',
'src/core/ext/transport/chttp2/transport/huffsyms.h',
'src/core/ext/transport/chttp2/transport/internal.h',
'src/core/ext/transport/chttp2/transport/legacy_frame.h',
@ -2940,7 +2928,6 @@ Pod::Spec.new do |s|
'src/core/lib/channel/channel_stack.h',
'src/core/lib/channel/channel_stack_builder.h',
'src/core/lib/channel/channel_stack_builder_impl.h',
'src/core/lib/channel/channel_stack_trace.h',
'src/core/lib/channel/connected_channel.h',
'src/core/lib/channel/promise_based_filter.h',
'src/core/lib/channel/status_util.h',
@ -2951,6 +2938,8 @@ Pod::Spec.new do |s|
'src/core/lib/config/load_config.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/trace.h',
'src/core/lib/debug/trace_flags.h',
'src/core/lib/debug/trace_impl.h',
'src/core/lib/event_engine/ares_resolver.h',
'src/core/lib/event_engine/cf_engine/cf_engine.h',
'src/core/lib/event_engine/cf_engine/cfstream_endpoint.h',
@ -3032,6 +3021,7 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/env.h',
'src/core/lib/gprpp/examine_stack.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/glob.h',
'src/core/lib/gprpp/host_port.h',
'src/core/lib/gprpp/if_list.h',
'src/core/lib/gprpp/load_file.h',
@ -3160,7 +3150,6 @@ Pod::Spec.new do |s|
'src/core/lib/promise/seq.h',
'src/core/lib/promise/sleep.h',
'src/core/lib/promise/status_flag.h',
'src/core/lib/promise/trace.h',
'src/core/lib/promise/try_join.h',
'src/core/lib/promise/try_seq.h',
'src/core/lib/resource_quota/api.h',
@ -3170,7 +3159,6 @@ Pod::Spec.new do |s|
'src/core/lib/resource_quota/periodic_update.h',
'src/core/lib/resource_quota/resource_quota.h',
'src/core/lib/resource_quota/thread_quota.h',
'src/core/lib/resource_quota/trace.h',
'src/core/lib/security/authorization/audit_logging.h',
'src/core/lib/security/authorization/authorization_engine.h',
'src/core/lib/security/authorization/authorization_policy_provider.h',
@ -3235,7 +3223,6 @@ Pod::Spec.new do |s|
'src/core/lib/surface/api_trace.h',
'src/core/lib/surface/call.h',
'src/core/lib/surface/call_test_only.h',
'src/core/lib/surface/call_trace.h',
'src/core/lib/surface/call_utils.h',
'src/core/lib/surface/channel.h',
'src/core/lib/surface/channel_create.h',
@ -3318,7 +3305,6 @@ Pod::Spec.new do |s|
'src/core/resolver/server_address.h',
'src/core/resolver/xds/xds_dependency_manager.h',
'src/core/resolver/xds/xds_resolver_attributes.h',
'src/core/resolver/xds/xds_resolver_trace.h',
'src/core/server/server.h',
'src/core/server/server_call_tracer_filter.h',
'src/core/server/server_config_selector.h',

21
grpc.gemspec generated

@ -243,8 +243,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/transport/chttp2/transport/hpack_parser_table.h )
s.files += %w( src/core/ext/transport/chttp2/transport/http2_settings.cc )
s.files += %w( src/core/ext/transport/chttp2/transport/http2_settings.h )
s.files += %w( src/core/ext/transport/chttp2/transport/http_trace.cc )
s.files += %w( src/core/ext/transport/chttp2/transport/http_trace.h )
s.files += %w( src/core/ext/transport/chttp2/transport/huffsyms.cc )
s.files += %w( src/core/ext/transport/chttp2/transport/huffsyms.h )
s.files += %w( src/core/ext/transport/chttp2/transport/internal.h )
@ -264,7 +262,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/ext/transport/chttp2/transport/write_size_policy.cc )
s.files += %w( src/core/ext/transport/chttp2/transport/write_size_policy.h )
s.files += %w( src/core/ext/transport/chttp2/transport/writing.cc )
s.files += %w( src/core/ext/transport/inproc/inproc_plugin.cc )
s.files += %w( src/core/ext/transport/inproc/inproc_transport.cc )
s.files += %w( src/core/ext/transport/inproc/inproc_transport.h )
s.files += %w( src/core/ext/transport/inproc/legacy_inproc_transport.cc )
@ -1109,8 +1106,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/channel/channel_stack_builder.h )
s.files += %w( src/core/lib/channel/channel_stack_builder_impl.cc )
s.files += %w( src/core/lib/channel/channel_stack_builder_impl.h )
s.files += %w( src/core/lib/channel/channel_stack_trace.cc )
s.files += %w( src/core/lib/channel/channel_stack_trace.h )
s.files += %w( src/core/lib/channel/connected_channel.cc )
s.files += %w( src/core/lib/channel/connected_channel.h )
s.files += %w( src/core/lib/channel/promise_based_filter.cc )
@ -1133,6 +1128,9 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/debug/event_log.h )
s.files += %w( src/core/lib/debug/trace.cc )
s.files += %w( src/core/lib/debug/trace.h )
s.files += %w( src/core/lib/debug/trace_flags.cc )
s.files += %w( src/core/lib/debug/trace_flags.h )
s.files += %w( src/core/lib/debug/trace_impl.h )
s.files += %w( src/core/lib/event_engine/ares_resolver.cc )
s.files += %w( src/core/lib/event_engine/ares_resolver.h )
s.files += %w( src/core/lib/event_engine/cf_engine/cf_engine.cc )
@ -1225,7 +1223,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/event_engine/thready_event_engine/thready_event_engine.h )
s.files += %w( src/core/lib/event_engine/time_util.cc )
s.files += %w( src/core/lib/event_engine/time_util.h )
s.files += %w( src/core/lib/event_engine/trace.cc )
s.files += %w( src/core/lib/event_engine/trace.h )
s.files += %w( src/core/lib/event_engine/utils.cc )
s.files += %w( src/core/lib/event_engine/utils.h )
@ -1268,6 +1265,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/examine_stack.h )
s.files += %w( src/core/lib/gprpp/fork.cc )
s.files += %w( src/core/lib/gprpp/fork.h )
s.files += %w( src/core/lib/gprpp/glob.cc )
s.files += %w( src/core/lib/gprpp/glob.h )
s.files += %w( src/core/lib/gprpp/host_port.cc )
s.files += %w( src/core/lib/gprpp/host_port.h )
s.files += %w( src/core/lib/gprpp/if_list.h )
@ -1357,7 +1356,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/ev_poll_posix.h )
s.files += %w( src/core/lib/iomgr/ev_posix.cc )
s.files += %w( src/core/lib/iomgr/ev_posix.h )
s.files += %w( src/core/lib/iomgr/ev_windows.cc )
s.files += %w( src/core/lib/iomgr/event_engine_shims/closure.cc )
s.files += %w( src/core/lib/iomgr/event_engine_shims/closure.h )
s.files += %w( src/core/lib/iomgr/event_engine_shims/endpoint.cc )
@ -1500,8 +1498,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/promise/sleep.cc )
s.files += %w( src/core/lib/promise/sleep.h )
s.files += %w( src/core/lib/promise/status_flag.h )
s.files += %w( src/core/lib/promise/trace.cc )
s.files += %w( src/core/lib/promise/trace.h )
s.files += %w( src/core/lib/promise/try_join.h )
s.files += %w( src/core/lib/promise/try_seq.h )
s.files += %w( src/core/lib/resource_quota/api.cc )
@ -1518,8 +1514,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/resource_quota/resource_quota.h )
s.files += %w( src/core/lib/resource_quota/thread_quota.cc )
s.files += %w( src/core/lib/resource_quota/thread_quota.h )
s.files += %w( src/core/lib/resource_quota/trace.cc )
s.files += %w( src/core/lib/resource_quota/trace.h )
s.files += %w( src/core/lib/security/authorization/audit_logging.cc )
s.files += %w( src/core/lib/security/authorization/audit_logging.h )
s.files += %w( src/core/lib/security/authorization/authorization_engine.h )
@ -1644,11 +1638,9 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/slice/slice_buffer.cc )
s.files += %w( src/core/lib/slice/slice_buffer.h )
s.files += %w( src/core/lib/slice/slice_internal.h )
s.files += %w( src/core/lib/slice/slice_refcount.cc )
s.files += %w( src/core/lib/slice/slice_refcount.h )
s.files += %w( src/core/lib/slice/slice_string_helpers.cc )
s.files += %w( src/core/lib/slice/slice_string_helpers.h )
s.files += %w( src/core/lib/surface/api_trace.cc )
s.files += %w( src/core/lib/surface/api_trace.h )
s.files += %w( src/core/lib/surface/byte_buffer.cc )
s.files += %w( src/core/lib/surface/byte_buffer_reader.cc )
@ -1657,7 +1649,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/surface/call_details.cc )
s.files += %w( src/core/lib/surface/call_log_batch.cc )
s.files += %w( src/core/lib/surface/call_test_only.h )
s.files += %w( src/core/lib/surface/call_trace.h )
s.files += %w( src/core/lib/surface/call_utils.cc )
s.files += %w( src/core/lib/surface/call_utils.h )
s.files += %w( src/core/lib/surface/channel.cc )
@ -1825,8 +1816,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/resolver/xds/xds_dependency_manager.h )
s.files += %w( src/core/resolver/xds/xds_resolver.cc )
s.files += %w( src/core/resolver/xds/xds_resolver_attributes.h )
s.files += %w( src/core/resolver/xds/xds_resolver_trace.cc )
s.files += %w( src/core/resolver/xds/xds_resolver_trace.h )
s.files += %w( src/core/server/server.cc )
s.files += %w( src/core/server/server.h )
s.files += %w( src/core/server/server_call_tracer_filter.cc )

21
package.xml generated

@ -225,8 +225,6 @@
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/hpack_parser_table.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/http2_settings.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/http2_settings.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/http_trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/http_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/huffsyms.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/huffsyms.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/internal.h" role="src" />
@ -246,7 +244,6 @@
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/write_size_policy.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/write_size_policy.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/chttp2/transport/writing.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_plugin.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_transport.cc" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/inproc_transport.h" role="src" />
<file baseinstalldir="/" name="src/core/ext/transport/inproc/legacy_inproc_transport.cc" role="src" />
@ -1091,8 +1088,6 @@
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder_impl.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_builder_impl.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/channel_stack_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/connected_channel.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/connected_channel.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/channel/promise_based_filter.cc" role="src" />
@ -1115,6 +1110,9 @@
<file baseinstalldir="/" name="src/core/lib/debug/event_log.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace_flags.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace_flags.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/trace_impl.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/ares_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/ares_resolver.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/cf_engine/cf_engine.cc" role="src" />
@ -1207,7 +1205,6 @@
<file baseinstalldir="/" name="src/core/lib/event_engine/thready_event_engine/thready_event_engine.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/time_util.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/time_util.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/utils.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/event_engine/utils.h" role="src" />
@ -1250,6 +1247,8 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/examine_stack.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/glob.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/glob.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/host_port.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/host_port.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/if_list.h" role="src" />
@ -1339,7 +1338,6 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_poll_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/ev_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/event_engine_shims/closure.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/event_engine_shims/closure.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/event_engine_shims/endpoint.cc" role="src" />
@ -1482,8 +1480,6 @@
<file baseinstalldir="/" name="src/core/lib/promise/sleep.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/sleep.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/status_flag.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/try_join.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/try_seq.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/resource_quota/api.cc" role="src" />
@ -1500,8 +1496,6 @@
<file baseinstalldir="/" name="src/core/lib/resource_quota/resource_quota.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/resource_quota/thread_quota.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/resource_quota/thread_quota.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/resource_quota/trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/resource_quota/trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/authorization/audit_logging.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/authorization/audit_logging.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/security/authorization/authorization_engine.h" role="src" />
@ -1626,11 +1620,9 @@
<file baseinstalldir="/" name="src/core/lib/slice/slice_buffer.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_buffer.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_refcount.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_refcount.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_string_helpers.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/slice/slice_string_helpers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/api_trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/api_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/byte_buffer.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/byte_buffer_reader.cc" role="src" />
@ -1639,7 +1631,6 @@
<file baseinstalldir="/" name="src/core/lib/surface/call_details.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_log_batch.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_test_only.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/call_utils.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/surface/channel.cc" role="src" />
@ -1807,8 +1798,6 @@
<file baseinstalldir="/" name="src/core/resolver/xds/xds_dependency_manager.h" role="src" />
<file baseinstalldir="/" name="src/core/resolver/xds/xds_resolver.cc" role="src" />
<file baseinstalldir="/" name="src/core/resolver/xds/xds_resolver_attributes.h" role="src" />
<file baseinstalldir="/" name="src/core/resolver/xds/xds_resolver_trace.cc" role="src" />
<file baseinstalldir="/" name="src/core/resolver/xds/xds_resolver_trace.h" role="src" />
<file baseinstalldir="/" name="src/core/server/server.cc" role="src" />
<file baseinstalldir="/" name="src/core/server/server.h" role="src" />
<file baseinstalldir="/" name="src/core/server/server_call_tracer_filter.cc" role="src" />

@ -547,7 +547,6 @@ grpc_cc_library(
"pipe",
"poll",
"promise_factory",
"promise_trace",
"try_seq",
"//:gpr",
"//:gpr_platform",
@ -577,11 +576,11 @@ grpc_cc_library(
"context",
"poll",
"promise_factory",
"promise_trace",
"ref_counted",
"//:event_engine_base_hdrs",
"//:exec_ctx",
"//:gpr",
"//:grpc_trace",
"//:ref_counted_ptr",
],
)
@ -796,9 +795,8 @@ grpc_cc_library(
"construct_destruct",
"poll",
"promise_like",
"promise_trace",
"//:gpr",
"//:gpr_platform",
"//:grpc_trace",
],
)
@ -887,9 +885,9 @@ grpc_cc_library(
"poll",
"promise_factory",
"promise_like",
"promise_trace",
"//:debug_location",
"//:gpr",
"//:grpc_trace",
],
)
@ -1025,8 +1023,8 @@ grpc_cc_library(
deps = [
"activity",
"poll",
"promise_trace",
"//:gpr",
"//:grpc_trace",
],
)
@ -1044,9 +1042,9 @@ grpc_cc_library(
deps = [
"activity",
"poll",
"promise_trace",
"wait_set",
"//:gpr",
"//:grpc_trace",
],
)
@ -1068,7 +1066,6 @@ grpc_cc_library(
"context",
"poll",
"promise_factory",
"promise_trace",
"//:debug_location",
"//:gpr",
],
@ -1095,7 +1092,6 @@ grpc_cc_library(
"interceptor_list",
"map",
"poll",
"promise_trace",
"seq",
"//:debug_location",
"//:gpr",
@ -1136,21 +1132,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "promise_trace",
srcs = [
"lib/promise/trace.cc",
],
hdrs = [
"lib/promise/trace.h",
],
language = "c++",
deps = [
"//:gpr_platform",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "mpsc",
hdrs = [
@ -1205,10 +1186,10 @@ grpc_cc_library(
"poll",
"promise_factory",
"promise_status",
"promise_trace",
"status_flag",
"//:gpr",
"//:gpr_platform",
"//:grpc_trace",
],
)
@ -1445,7 +1426,6 @@ grpc_cc_library(
"periodic_update",
"poll",
"race",
"resource_quota_trace",
"seq",
"slice_refcount",
"time",
@ -1536,20 +1516,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "resource_quota_trace",
srcs = [
"lib/resource_quota/trace.cc",
],
hdrs = [
"lib/resource_quota/trace.h",
],
deps = [
"//:gpr_platform",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "resource_quota",
srcs = [
@ -1578,9 +1544,6 @@ grpc_cc_library(
grpc_cc_library(
name = "slice_refcount",
srcs = [
"lib/slice/slice_refcount.cc",
],
hdrs = [
"lib/slice/slice_refcount.h",
],
@ -2688,11 +2651,9 @@ grpc_cc_library(
],
)
# TODO(hork): delete this target when refactoring to GRPC_TRACE_LOG(flag, level)
grpc_cc_library(
name = "event_engine_trace",
srcs = [
"lib/event_engine/trace.cc",
],
hdrs = [
"lib/event_engine/trace.h",
],
@ -3107,21 +3068,6 @@ grpc_cc_library(
deps = ["//:gpr_platform"],
)
grpc_cc_library(
name = "channel_stack_trace",
srcs = [
"lib/channel/channel_stack_trace.cc",
],
hdrs = [
"lib/channel/channel_stack_trace.h",
],
language = "c++",
deps = [
"//:gpr_platform",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "channel_init",
srcs = [
@ -3142,7 +3088,6 @@ grpc_cc_library(
"call_filters",
"channel_args",
"channel_fwd",
"channel_stack_trace",
"channel_stack_type",
"interception_chain",
"//:channel_stack_builder",
@ -4767,7 +4712,6 @@ grpc_cc_library(
"slice",
"slice_buffer",
"validation_errors",
"//:call_trace",
"//:channel_arg_names",
"//:config",
"//:gpr",
@ -5805,6 +5749,13 @@ grpc_cc_library(
deps = ["//:gpr"],
)
grpc_cc_library(
name = "glob",
srcs = ["lib/gprpp/glob.cc"],
hdrs = ["lib/gprpp/glob.h"],
external_deps = ["absl/strings"],
)
grpc_cc_library(
name = "status_conversion",
srcs = ["lib/transport/status_conversion.cc"],
@ -6581,21 +6532,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc_resolver_xds_trace",
srcs = [
"resolver/xds/xds_resolver_trace.cc",
],
hdrs = [
"resolver/xds/xds_resolver_trace.h",
],
language = "c++",
deps = [
"//:gpr_platform",
"//:grpc_trace",
],
)
grpc_cc_library(
name = "xds_dependency_manager",
srcs = [
@ -6614,7 +6550,6 @@ grpc_cc_library(
language = "c++",
deps = [
"grpc_lb_xds_channel_args",
"grpc_resolver_xds_trace",
"grpc_xds_client",
"match",
"ref_counted",
@ -6656,7 +6591,6 @@ grpc_cc_library(
"experiments",
"grpc_lb_policy_ring_hash",
"grpc_resolver_xds_attributes",
"grpc_resolver_xds_trace",
"grpc_service_config",
"grpc_xds_client",
"iomgr_fwd",
@ -7066,7 +7000,6 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc_transport_inproc",
srcs = [
"ext/transport/inproc/inproc_plugin.cc",
"ext/transport/inproc/inproc_transport.cc",
"ext/transport/inproc/legacy_inproc_transport.cc",
],
@ -7329,9 +7262,6 @@ grpc_cc_library(
grpc_cc_library(
name = "chaotic_good_transport",
srcs = [
"ext/transport/chaotic_good/chaotic_good_transport.cc",
],
hdrs = [
"ext/transport/chaotic_good/chaotic_good_transport.h",
],

@ -100,12 +100,6 @@ using grpc_event_engine::experimental::EventEngine;
using internal::ClientChannelMethodParsedConfig;
// Defined in legacy client channel filter.
// TODO(roth): Move these here when we remove the legacy filter.
extern TraceFlag grpc_client_channel_trace;
extern TraceFlag grpc_client_channel_call_trace;
extern TraceFlag grpc_client_channel_lb_call_trace;
//
// ClientChannel::ResolverResultHandler
//
@ -117,10 +111,9 @@ class ClientChannel::ResolverResultHandler : public Resolver::ResultHandler {
: client_channel_(std::move(client_channel)) {}
~ResolverResultHandler() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: resolver shutdown complete",
client_channel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << client_channel_.get()
<< ": resolver shutdown complete";
}
void ReportResult(Resolver::Result result) override
@ -236,14 +229,12 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
void OnConnectivityStateChange(
RefCountedPtr<ConnectivityStateWatcherInterface> self,
grpc_connectivity_state state, const absl::Status& status) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: connectivity change for subchannel "
"wrapper %p subchannel %p; hopping into work_serializer",
subchannel_wrapper_->client_channel_.get(),
subchannel_wrapper_.get(),
subchannel_wrapper_->subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << subchannel_wrapper_->client_channel_.get()
<< ": connectivity change for subchannel wrapper "
<< subchannel_wrapper_.get() << " subchannel "
<< subchannel_wrapper_->subchannel_.get()
<< "; hopping into work_serializer";
self.release(); // Held by callback.
subchannel_wrapper_->client_channel_->work_serializer_->Run(
[this, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
@ -261,16 +252,14 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
const absl::Status& status)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(
*subchannel_wrapper_->client_channel_->work_serializer_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: processing connectivity change in work "
"serializer for subchannel wrapper %p subchannel %p watcher=%p "
"state=%s status=%s",
subchannel_wrapper_->client_channel_.get(),
subchannel_wrapper_.get(), subchannel_wrapper_->subchannel_.get(),
watcher_.get(), ConnectivityStateName(state),
status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << subchannel_wrapper_->client_channel_.get()
<< ": processing connectivity change in work serializer for subchannel "
"wrapper "
<< subchannel_wrapper_.get() << " subchannel "
<< subchannel_wrapper_->subchannel_.get()
<< " watcher=" << watcher_.get()
<< "state=" << ConnectivityStateName(state) << " status=" << status;
absl::optional<absl::Cord> keepalive_throttling =
status.GetPayload(kKeepaliveThrottlingKey);
if (keepalive_throttling.has_value()) {
@ -281,12 +270,10 @@ class ClientChannel::SubchannelWrapper::WatcherWrapper
subchannel_wrapper_->client_channel_->keepalive_time_) {
subchannel_wrapper_->client_channel_->keepalive_time_ =
new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: throttling keepalive time to %d",
subchannel_wrapper_->client_channel_.get(),
subchannel_wrapper_->client_channel_->keepalive_time_);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << subchannel_wrapper_->client_channel_.get()
<< ": throttling keepalive time to "
<< subchannel_wrapper_->client_channel_->keepalive_time_;
// Propagate the new keepalive time to all subchannels. This is so
// that new transports created by any subchannel (and not just the
// subchannel that received the GOAWAY), use the new keepalive time.
@ -320,17 +307,14 @@ ClientChannel::SubchannelWrapper::SubchannelWrapper(
WeakRefCountedPtr<ClientChannel> client_channel,
RefCountedPtr<Subchannel> subchannel)
: SubchannelInterfaceWithCallDestination(
GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)
? "SubchannelWrapper"
GRPC_TRACE_FLAG_ENABLED(client_channel) ? "SubchannelWrapper"
: nullptr),
client_channel_(std::move(client_channel)),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(
GPR_INFO,
"client_channel=%p: creating subchannel wrapper %p for subchannel %p",
client_channel_.get(), this, subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << client_channel_.get()
<< ": creating subchannel wrapper " << this << " for subchannel "
<< subchannel_.get();
#ifndef NDEBUG
DCHECK(client_channel_->work_serializer_->RunningInWorkSerializer());
#endif
@ -353,12 +337,10 @@ ClientChannel::SubchannelWrapper::SubchannelWrapper(
}
ClientChannel::SubchannelWrapper::~SubchannelWrapper() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: destroying subchannel wrapper %p "
"for subchannel %p",
client_channel_.get(), this, subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << client_channel_.get()
<< ": destroying subchannel wrapper " << this << " for subchannel "
<< subchannel_.get();
}
void ClientChannel::SubchannelWrapper::Orphaned() {
@ -467,7 +449,7 @@ class ClientChannel::ClientChannelControlHelper
RefCountedPtr<LoadBalancingPolicy::SubchannelPicker> picker) override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*client_channel_->work_serializer_) {
if (client_channel_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel)) {
const char* extra = client_channel_->disconnect_error_.ok()
? ""
: " (ignoring -- channel shutting down)";
@ -486,10 +468,9 @@ class ClientChannel::ClientChannelControlHelper
void RequestReresolution() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*client_channel_->work_serializer_) {
if (client_channel_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: started name re-resolving",
client_channel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << client_channel_.get()
<< ": started name re-resolving";
client_channel_->resolver_->RequestReresolutionLocked();
}
@ -644,9 +625,8 @@ ClientChannel::ClientChannel(
work_serializer_(std::make_shared<WorkSerializer>(event_engine_)),
state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
subchannel_pool_(GetSubchannelPool(channel_args_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: creating client_channel", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": creating client_channel";
// Set initial keepalive time.
auto keepalive_arg = channel_args_.GetInt(GRPC_ARG_KEEPALIVE_TIME_MS);
if (keepalive_arg.has_value()) {
@ -662,15 +642,13 @@ ClientChannel::ClientChannel(
}
ClientChannel::~ClientChannel() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: destroying", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": destroying";
}
void ClientChannel::Orphaned() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: shutting down", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": shutting down";
// Weird capture then copy needed to satisfy thread safety analysis,
// otherwise it seems to fail to recognize the correct lock is taken in the
// lambda.
@ -858,10 +836,9 @@ void ClientChannel::StartCall(UnstartedCallHandler unstarted_handler) {
}
void ClientChannel::CreateResolverLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: starting name resolution for %s",
this, uri_to_resolve_.c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": starting name resolution for "
<< uri_to_resolve_;
resolver_ = CoreConfiguration::Get().resolver_registry().CreateResolver(
uri_to_resolve_, channel_args_, nullptr, work_serializer_,
std::make_unique<ResolverResultHandler>(
@ -872,28 +849,24 @@ void ClientChannel::CreateResolverLocked() {
UpdateStateLocked(GRPC_CHANNEL_CONNECTING, absl::Status(),
"started resolving");
resolver_->StartLocked();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: created resolver=%p", this,
resolver_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": created resolver=" << resolver_.get();
}
void ClientChannel::DestroyResolverAndLbPolicyLocked() {
if (resolver_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: shutting down resolver=%p", this,
resolver_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": shutting down resolver=" << resolver_.get();
resolver_.reset();
saved_service_config_.reset();
saved_config_selector_.reset();
resolver_data_for_calls_.Set(ResolverDataForCalls{nullptr, nullptr});
// Clear LB policy if set.
if (lb_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": shutting down lb_policy=" << lb_policy_.get();
lb_policy_.reset();
picker_.Set(MakeRefCounted<LoadBalancingPolicy::DropPicker>(
absl::UnavailableError("Channel shutdown")));
@ -975,9 +948,8 @@ RefCountedPtr<LoadBalancingPolicy::Config> ChooseLbPolicy(
void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
// Handle race conditions.
if (resolver_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: got resolver result", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": got resolver result";
// Grab resolver result health callback.
auto resolver_callback = std::move(result.result_health_callback);
absl::Status resolver_result_status;
@ -1011,20 +983,17 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
RefCountedPtr<ServiceConfig> service_config;
RefCountedPtr<ConfigSelector> config_selector;
if (!result.service_config.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: resolver returned service config error: %s",
this, result.service_config.status().ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": resolver returned service config error: "
<< result.service_config.status();
// If the service config was invalid, then fallback to the
// previously returned service config, if any.
if (saved_service_config_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: resolver returned invalid service config; "
"continuing to use previous service config",
this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": resolver returned invalid service config; "
"continuing to use previous service config";
service_config = saved_service_config_;
config_selector = saved_config_selector_;
} else {
@ -1038,12 +1007,10 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
}
} else if (*result.service_config == nullptr) {
// Resolver did not return any service config.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"client_channel=%p: resolver returned no service config; "
"using default service config for channel",
this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": resolver returned no service config; using default service "
"config for channel";
service_config = default_service_config_;
} else {
// Use ServiceConfig and ConfigSelector returned by resolver.
@ -1078,8 +1045,9 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
// TODO(ncteisen): might be worth somehow including a snippet of the
// config in the trace, at the risk of bloating the trace logs.
trace_strings.push_back("Service config changed");
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: service config not changed", this);
} else {
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": service config not changed";
}
// Create or update LB policy, as needed.
resolver_result_status = CreateOrUpdateLbPolicyLocked(
@ -1110,10 +1078,9 @@ void ClientChannel::OnResolverResultChangedLocked(Resolver::Result result) {
void ClientChannel::OnResolverErrorLocked(absl::Status status) {
if (resolver_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: resolver transient failure: %s", this,
status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this
<< ": resolver transient failure: " << status;
// If we already have an LB policy from a previous resolution
// result, then we continue to let it set the connectivity state.
// Otherwise, we go into TRANSIENT_FAILURE.
@ -1155,10 +1122,9 @@ absl::Status ClientChannel::CreateOrUpdateLbPolicyLocked(
lb_policy_ = CreateLbPolicyLocked(update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: Updating child policy %p", this,
lb_policy_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": Updating child policy "
<< lb_policy_.get();
return lb_policy_->UpdateLocked(std::move(update_args));
}
@ -1181,11 +1147,10 @@ OrphanablePtr<LoadBalancingPolicy> ClientChannel::CreateLbPolicyLocked(
lb_policy_args.args = args;
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_client_channel_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: created new LB policy %p", this,
lb_policy.get());
}
&client_channel_trace);
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": created new LB policy "
<< lb_policy.get();
return lb_policy;
}
@ -1194,16 +1159,14 @@ void ClientChannel::UpdateServiceConfigInControlPlaneLocked(
RefCountedPtr<ConfigSelector> config_selector, std::string lb_policy_name) {
std::string service_config_json(service_config->json_string());
// Update service config.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: using service config: \"%s\"", this,
service_config_json.c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": using service config: \""
<< service_config_json << "\"";
saved_service_config_ = std::move(service_config);
// Update config selector.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: using ConfigSelector %p", this,
config_selector.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": using ConfigSelector "
<< config_selector.get();
saved_config_selector_ = std::move(config_selector);
// Update the data used by GetChannelInfo().
{
@ -1214,10 +1177,9 @@ void ClientChannel::UpdateServiceConfigInControlPlaneLocked(
}
void ClientChannel::UpdateServiceConfigInDataPlaneLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: switching to ConfigSelector %p", this,
saved_config_selector_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": switching to ConfigSelector "
<< saved_config_selector_.get();
// Use default config selector if resolver didn't supply one.
RefCountedPtr<ConfigSelector> config_selector = saved_config_selector_;
if (config_selector == nullptr) {
@ -1279,9 +1241,8 @@ void ClientChannel::UpdateStateAndPickerLocked(
}
void ClientChannel::StartIdleTimer() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: idle timer started", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "client_channel=" << this << ": idle timer started";
auto self = WeakRefAsSubclass<ClientChannel>();
auto promise = Loop([self]() {
return TrySeq(Sleep(Timestamp::Now() + self->idle_timeout_),
@ -1316,10 +1277,9 @@ void ClientChannel::StartIdleTimer() {
absl::Status ClientChannel::ApplyServiceConfigToCall(
ConfigSelector& config_selector,
ClientMetadata& client_initial_metadata) const {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "client_channel=%p: %sapplying service config to call",
this, GetContext<Activity>()->DebugTag().c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "client_channel=" << this << ": " << GetContext<Activity>()->DebugTag()
<< " service config to call";
// Create a ClientChannelServiceConfigCallData for the call. This stores
// a ref to the ServiceConfig and caches the right set of parsed configs
// to use for the call. The ClientChannelServiceConfigCallData will store

@ -113,10 +113,6 @@ namespace grpc_core {
using internal::ClientChannelMethodParsedConfig;
TraceFlag grpc_client_channel_trace(false, "client_channel");
TraceFlag grpc_client_channel_call_trace(false, "client_channel_call");
TraceFlag grpc_client_channel_lb_call_trace(false, "client_channel_lb_call");
//
// ClientChannelFilter::CallData definition
//
@ -411,11 +407,9 @@ class DynamicTerminationFilter::CallData final {
args, pollent, nullptr,
[service_config_call_data]() { service_config_call_data->Commit(); },
/*is_transparent_retry=*/false);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p dynamic_termination_calld=%p: create lb_call=%p", chand,
client_channel, calld->lb_call_.get());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " dynamic_termination_calld=" << client_channel
<< ": create lb_call=" << calld->lb_call_.get();
}
private:
@ -466,9 +460,8 @@ class ClientChannelFilter::ResolverResultHandler final
}
~ResolverResultHandler() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver shutdown complete", chand_);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << chand_ << ": resolver shutdown complete";
GRPC_CHANNEL_STACK_UNREF(chand_->owning_stack_, "ResolverResultHandler");
}
@ -498,16 +491,14 @@ class ClientChannelFilter::SubchannelWrapper final
public:
SubchannelWrapper(ClientChannelFilter* chand,
RefCountedPtr<Subchannel> subchannel)
: SubchannelInterface(GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)
: SubchannelInterface(GRPC_TRACE_FLAG_ENABLED(client_channel)
? "SubchannelWrapper"
: nullptr),
chand_(chand),
subchannel_(std::move(subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: creating subchannel wrapper %p for subchannel %p",
chand, this, subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << chand << ": creating subchannel wrapper " << this
<< " for subchannel " << subchannel_.get();
GRPC_CHANNEL_STACK_REF(chand_->owning_stack_, "SubchannelWrapper");
#ifndef NDEBUG
DCHECK(chand_->work_serializer_->RunningInWorkSerializer());
@ -528,11 +519,9 @@ class ClientChannelFilter::SubchannelWrapper final
}
~SubchannelWrapper() override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: destroying subchannel wrapper %p for subchannel %p",
chand_, this, subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << chand_ << ": destroying subchannel wrapper " << this
<< "for subchannel " << subchannel_.get();
if (!IsWorkSerializerDispatchEnabled()) {
chand_->subchannel_wrappers_.erase(this);
if (chand_->channelz_node_ != nullptr) {
@ -665,12 +654,11 @@ class ClientChannelFilter::SubchannelWrapper final
void OnConnectivityStateChange(
RefCountedPtr<ConnectivityStateWatcherInterface> self,
grpc_connectivity_state state, const absl::Status& status) override {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: connectivity change for subchannel wrapper %p "
"subchannel %p; hopping into work_serializer",
parent_->chand_, parent_.get(), parent_->subchannel_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << parent_->chand_
<< ": connectivity change for subchannel wrapper " << parent_.get()
<< " subchannel " << parent_->subchannel_.get()
<< "hopping into work_serializer";
self.release(); // Held by callback.
parent_->chand_->work_serializer_->Run(
[this, state, status]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(
@ -689,15 +677,13 @@ class ClientChannelFilter::SubchannelWrapper final
void ApplyUpdateInControlPlaneWorkSerializer(grpc_connectivity_state state,
const absl::Status& status)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*parent_->chand_->work_serializer_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: processing connectivity change in work serializer "
"for subchannel wrapper %p subchannel %p watcher=%p "
"state=%s status=%s",
parent_->chand_, parent_.get(), parent_->subchannel_.get(),
watcher_.get(), ConnectivityStateName(state),
status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << parent_->chand_
<< ": processing connectivity change in work serializer for "
"subchannel wrapper "
<< parent_.get() << " subchannel " << parent_->subchannel_.get()
<< " watcher=" << watcher_.get()
<< " state=" << ConnectivityStateName(state) << " status=" << status;
absl::optional<absl::Cord> keepalive_throttling =
status.GetPayload(kKeepaliveThrottlingKey);
if (keepalive_throttling.has_value()) {
@ -706,10 +692,10 @@ class ClientChannelFilter::SubchannelWrapper final
&new_keepalive_time)) {
if (new_keepalive_time > parent_->chand_->keepalive_time_) {
parent_->chand_->keepalive_time_ = new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: throttling keepalive time to %d",
parent_->chand_, parent_->chand_->keepalive_time_);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << parent_->chand_
<< ": throttling keepalive time to "
<< parent_->chand_->keepalive_time_;
// Propagate the new keepalive time to all subchannels. This is so
// that new transports created by any subchannel (and not just the
// subchannel that received the GOAWAY), use the new keepalive time.
@ -992,14 +978,13 @@ class ClientChannelFilter::ClientChannelControlHelper final
RefCountedPtr<LoadBalancingPolicy::SubchannelPicker> picker)
override ABSL_EXCLUSIVE_LOCKS_REQUIRED(*chand_->work_serializer_) {
if (chand_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
const char* extra = chand_->disconnect_error_.ok()
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << chand_
<< ": update: state=" << ConnectivityStateName(state) << " status=("
<< status << ") picker=" << picker.get()
<< (chand_->disconnect_error_.ok()
? ""
: " (ignoring -- channel shutting down)";
gpr_log(GPR_INFO, "chand=%p: update: state=%s status=(%s) picker=%p%s",
chand_, ConnectivityStateName(state), status.ToString().c_str(),
picker.get(), extra);
}
: " (ignoring -- channel shutting down)");
// Do update only if not shutting down.
if (chand_->disconnect_error_.ok()) {
chand_->UpdateStateAndPickerLocked(state, status, "helper",
@ -1010,9 +995,8 @@ class ClientChannelFilter::ClientChannelControlHelper final
void RequestReresolution() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*chand_->work_serializer_) {
if (chand_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand_);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << chand_ << ": started name re-resolving";
chand_->resolver_->RequestReresolutionLocked();
}
@ -1104,10 +1088,9 @@ ClientChannelFilter::ClientChannelFilter(grpc_channel_element_args* args,
std::make_shared<WorkSerializer>(*args->channel_stack->event_engine)),
state_tracker_("client_channel", GRPC_CHANNEL_IDLE),
subchannel_pool_(GetSubchannelPool(channel_args_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: creating client_channel for channel stack %p",
this, owning_stack_);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": creating client_channel for channel stack "
<< owning_stack_;
// Start backup polling.
grpc_client_channel_start_backup_polling(interested_parties_);
// Check client channel factory.
@ -1176,9 +1159,8 @@ ClientChannelFilter::ClientChannelFilter(grpc_channel_element_args* args,
}
ClientChannelFilter::~ClientChannelFilter() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: destroying channel", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": destroying channel";
DestroyResolverAndLbPolicyLocked();
// Stop backup polling.
grpc_client_channel_stop_backup_polling(interested_parties_);
@ -1270,9 +1252,8 @@ void ClientChannelFilter::OnResolverResultChangedLocked(
Resolver::Result result) {
// Handle race conditions.
if (resolver_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: got resolver result", this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": got resolver result";
// Grab resolver result health callback.
auto resolver_callback = std::move(result.result_health_callback);
absl::Status resolver_result_status;
@ -1306,19 +1287,16 @@ void ClientChannelFilter::OnResolverResultChangedLocked(
RefCountedPtr<ServiceConfig> service_config;
RefCountedPtr<ConfigSelector> config_selector;
if (!result.service_config.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver returned service config error: %s",
this, result.service_config.status().ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": resolver returned service config error: "
<< result.service_config.status();
// If the service config was invalid, then fallback to the
// previously returned service config.
if (saved_service_config_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: resolver returned invalid service config. "
"Continuing to use previous service config.",
this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this
<< ": resolver returned invalid service config. "
"Continuing to use previous service config.";
service_config = saved_service_config_;
config_selector = saved_config_selector_;
} else {
@ -1332,12 +1310,10 @@ void ClientChannelFilter::OnResolverResultChangedLocked(
}
} else if (*result.service_config == nullptr) {
// Resolver did not return any service config.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO,
"chand=%p: resolver returned no service config. Using default "
"service config for channel.",
this);
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this
<< ": resolver returned no service config. Using default service "
"config for channel.";
service_config = default_service_config_;
} else {
// Use ServiceConfig and ConfigSelector returned by resolver.
@ -1369,8 +1345,9 @@ void ClientChannelFilter::OnResolverResultChangedLocked(
UpdateServiceConfigInControlPlaneLocked(
std::move(service_config), std::move(config_selector),
std::string(lb_policy_config->name()));
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: service config not changed", this);
} else {
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": service config not changed";
}
// Create or update LB policy, as needed.
resolver_result_status = CreateOrUpdateLbPolicyLocked(
@ -1404,10 +1381,8 @@ void ClientChannelFilter::OnResolverResultChangedLocked(
void ClientChannelFilter::OnResolverErrorLocked(absl::Status status) {
if (resolver_ == nullptr) return;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: resolver transient failure: %s", this,
status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": resolver transient failure: " << status;
// If we already have an LB policy from a previous resolution
// result, then we continue to let it set the connectivity state.
// Otherwise, we go into TRANSIENT_FAILURE.
@ -1453,10 +1428,8 @@ absl::Status ClientChannelFilter::CreateOrUpdateLbPolicyLocked(
lb_policy_ = CreateLbPolicyLocked(update_args.args);
}
// Update the policy.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: Updating child policy %p", this,
lb_policy_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": Updating child policy " << lb_policy_.get();
return lb_policy_->UpdateLocked(std::move(update_args));
}
@ -1478,11 +1451,9 @@ OrphanablePtr<LoadBalancingPolicy> ClientChannelFilter::CreateLbPolicyLocked(
lb_policy_args.args = args;
OrphanablePtr<LoadBalancingPolicy> lb_policy =
MakeOrphanable<ChildPolicyHandler>(std::move(lb_policy_args),
&grpc_client_channel_trace);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: created new LB policy %p", this,
lb_policy.get());
}
&client_channel_trace);
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": created new LB policy " << lb_policy.get();
grpc_pollset_set_add_pollset_set(lb_policy->interested_parties(),
interested_parties_);
return lb_policy;
@ -1492,10 +1463,9 @@ void ClientChannelFilter::UpdateServiceConfigInControlPlaneLocked(
RefCountedPtr<ServiceConfig> service_config,
RefCountedPtr<ConfigSelector> config_selector, std::string lb_policy_name) {
std::string service_config_json(service_config->json_string());
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: using service config: \"%s\"", this,
service_config_json.c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": using service config: \"" << service_config_json
<< "\"";
// Save service config.
saved_service_config_ = std::move(service_config);
// Swap out the data used by GetChannelInfo().
@ -1506,10 +1476,9 @@ void ClientChannelFilter::UpdateServiceConfigInControlPlaneLocked(
}
// Save config selector.
saved_config_selector_ = std::move(config_selector);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: using ConfigSelector %p", this,
saved_config_selector_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": using ConfigSelector "
<< saved_config_selector_.get();
}
void ClientChannelFilter::UpdateServiceConfigInDataPlaneLocked() {
@ -1517,10 +1486,9 @@ void ClientChannelFilter::UpdateServiceConfigInDataPlaneLocked() {
RefCountedPtr<ServiceConfig> service_config = saved_service_config_;
// Grab ref to config selector. Use default if resolver didn't supply one.
RefCountedPtr<ConfigSelector> config_selector = saved_config_selector_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: switching to ConfigSelector %p", this,
saved_config_selector_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": switching to ConfigSelector "
<< saved_config_selector_.get();
if (config_selector == nullptr) {
config_selector =
MakeRefCounted<DefaultConfigSelector>(saved_service_config_);
@ -1562,10 +1530,9 @@ void ClientChannelFilter::UpdateServiceConfigInDataPlaneLocked() {
}
void ClientChannelFilter::CreateResolverLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: starting name resolution for %s", this,
uri_to_resolve_.c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": starting name resolution for "
<< uri_to_resolve_;
resolver_ = CoreConfiguration::Get().resolver_registry().CreateResolver(
uri_to_resolve_, channel_args_, interested_parties_, work_serializer_,
std::make_unique<ResolverResultHandler>(this));
@ -1575,17 +1542,14 @@ void ClientChannelFilter::CreateResolverLocked() {
UpdateStateLocked(GRPC_CHANNEL_CONNECTING, absl::Status(),
"started resolving");
resolver_->StartLocked();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: created resolver=%p", this, resolver_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": created resolver=" << resolver_.get();
}
void ClientChannelFilter::DestroyResolverAndLbPolicyLocked() {
if (resolver_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: shutting down resolver=%p", this,
resolver_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": shutting down resolver=" << resolver_.get();
resolver_.reset();
// Clear resolution state.
saved_service_config_.reset();
@ -1605,10 +1569,9 @@ void ClientChannelFilter::DestroyResolverAndLbPolicyLocked() {
}
// Clear LB policy if set.
if (lb_policy_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: shutting down lb_policy=%p", this,
lb_policy_.get());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this
<< ": shutting down lb_policy=" << lb_policy_.get();
grpc_pollset_set_del_pollset_set(lb_policy_->interested_parties(),
interested_parties_);
lb_policy_.reset();
@ -1754,10 +1717,9 @@ void ClientChannelFilter::StartTransportOpLocked(grpc_transport_op* op) {
}
// Disconnect or enter IDLE.
if (!op->disconnect_with_error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
StatusToString(op->disconnect_with_error).c_str());
}
GRPC_TRACE_LOG(client_channel, INFO)
<< "chand=" << this << ": disconnect_with_error: "
<< StatusToString(op->disconnect_with_error);
DestroyResolverAndLbPolicyLocked();
intptr_t value;
if (grpc_error_get_int(op->disconnect_with_error,
@ -1862,11 +1824,9 @@ void ClientChannelFilter::RemoveConnectivityWatcher(
//
void ClientChannelFilter::CallData::RemoveCallFromResolverQueuedCallsLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: removing from resolver queued picks list",
chand(), this);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": removing from resolver queued picks list";
// Remove call's pollent from channel's interested_parties.
grpc_polling_entity_del_from_pollset_set(pollent(),
chand()->interested_parties_);
@ -1877,12 +1837,10 @@ void ClientChannelFilter::CallData::RemoveCallFromResolverQueuedCallsLocked() {
}
void ClientChannelFilter::CallData::AddCallToResolverQueuedCallsLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p: adding to resolver queued picks list; pollent=%s",
chand(), this, grpc_polling_entity_string(pollent()).c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": adding to resolver queued picks list; pollent="
<< grpc_polling_entity_string(pollent());
// Add call's pollent to channel's interested_parties, so that I/O
// can be done under the call's CQ.
grpc_polling_entity_add_to_pollset_set(pollent(),
@ -1894,10 +1852,9 @@ void ClientChannelFilter::CallData::AddCallToResolverQueuedCallsLocked() {
grpc_error_handle ClientChannelFilter::CallData::ApplyServiceConfigToCallLocked(
const absl::StatusOr<RefCountedPtr<ConfigSelector>>& config_selector) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand(), this);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": applying service config to call";
if (!config_selector.ok()) return config_selector.status();
// Create a ClientChannelServiceConfigCallData for the call. This stores
// a ref to the ServiceConfig and caches the right set of parsed configs
@ -1962,11 +1919,9 @@ absl::optional<absl::Status> ClientChannelFilter::CallData::CheckResolution(
}
// Handle errors.
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: error applying config to call: error=%s",
chand(), this, StatusToString(error).c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": error applying config to call: error=" << StatusToString(error);
return error;
}
// If the call was queued, add trace annotation.
@ -1989,20 +1944,18 @@ bool ClientChannelFilter::CallData::CheckResolutionLocked(
absl::Status resolver_error = chand()->resolver_transient_failure_error_;
if (!resolver_error.ok() &&
!send_initial_metadata()->GetOrCreatePointer(WaitForReady())->value) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: resolution failed, failing call",
chand(), this);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": resolution failed, failing call";
*config_selector = absl_status_to_grpc_error(resolver_error);
return true;
}
// Either the resolver has not yet returned a result, or it has
// returned transient failure but the call is wait_for_ready. In
// either case, queue the call.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: no resolver result yet", chand(),
this);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": no resolver result yet";
return false;
}
// Result found.
@ -2024,9 +1977,8 @@ ClientChannelFilter::FilterBasedCallData::FilterBasedCallData(
elem_(elem),
owning_call_(args.call_stack),
call_combiner_(args.call_combiner) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", chand(), this);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this << ": created call";
}
ClientChannelFilter::FilterBasedCallData::~FilterBasedCallData() {
@ -2062,8 +2014,8 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
auto* calld = static_cast<FilterBasedCallData*>(elem->call_data);
auto* chand = static_cast<ClientChannelFilter*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace) &&
!GRPC_TRACE_FLAG_ENABLED(grpc_trace_channel)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_call) &&
!GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from above: %s", chand,
calld, grpc_transport_stream_op_batch_string(batch, false).c_str());
}
@ -2082,10 +2034,9 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
// Note that once we have done so, we do not need to acquire the channel's
// resolution mutex, which is more efficient (especially for streaming calls).
if (calld->dynamic_call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on dynamic_call=%p",
chand, calld, calld->dynamic_call_.get());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": starting batch on dynamic_call=" << calld->dynamic_call_.get();
calld->dynamic_call_->StartTransportStreamOpBatch(batch);
return;
}
@ -2093,10 +2044,10 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(!calld->cancel_error_.ok())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, StatusToString(calld->cancel_error_).c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": failing batch with error: "
<< StatusToString(calld->cancel_error_);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, calld->cancel_error_, calld->call_combiner());
@ -2110,10 +2061,9 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
calld->cancel_error_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, StatusToString(calld->cancel_error_).c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": recording cancel_error=" << StatusToString(calld->cancel_error_);
// Fail all pending batches.
calld->PendingBatchesFail(calld->cancel_error_, NoYieldCallCombiner);
// Note: This will release the call combiner.
@ -2127,19 +2077,15 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
// channel's resolution mutex to apply the service config to the call,
// after which we will create a dynamic call.
if (GPR_LIKELY(batch->send_initial_metadata)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: grabbing resolution mutex to apply service "
"config",
chand, calld);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": grabbing resolution mutex to apply service ";
// If we're still in IDLE, we need to start resolving.
if (GPR_UNLIKELY(chand->CheckConnectivityState(false) ==
GRPC_CHANNEL_IDLE)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: triggering exit idle", chand,
calld);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": triggering exit idle";
// Bounce into the control plane work serializer to start resolving.
GRPC_CHANNEL_STACK_REF(chand->owning_stack_, "ExitIdle");
chand->work_serializer_->Run(
@ -2152,11 +2098,9 @@ void ClientChannelFilter::FilterBasedCallData::StartTransportStreamOpBatch(
calld->TryCheckResolution(/*was_queued=*/false);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yielding call combiner", chand,
calld);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": saved batch, yielding call combiner";
GRPC_CALL_COMBINER_STOP(calld->call_combiner(),
"batch does not include send_initial_metadata");
}
@ -2185,11 +2129,9 @@ size_t ClientChannelFilter::FilterBasedCallData::GetBatchIndex(
void ClientChannelFilter::FilterBasedCallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand(), this, idx);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": adding pending batch at index " << idx;
grpc_transport_stream_op_batch*& pending = pending_batches_[idx];
CHECK_EQ(pending, nullptr);
pending = batch;
@ -2212,7 +2154,7 @@ void ClientChannelFilter::FilterBasedCallData::PendingBatchesFail(
grpc_error_handle error,
YieldCallCombinerPredicate yield_call_combiner_predicate) {
CHECK(!error.ok());
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_call)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
@ -2255,7 +2197,7 @@ void ClientChannelFilter::FilterBasedCallData::ResumePendingBatchInCallCombiner(
// This is called via the call combiner, so access to calld is synchronized.
void ClientChannelFilter::FilterBasedCallData::PendingBatchesResume() {
// Retries not enabled; send down batches as-is.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_call)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
@ -2301,13 +2243,13 @@ class ClientChannelFilter::FilterBasedCallData::ResolverQueuedCallCanceller
auto* chand = calld->chand();
{
MutexLock lock(&chand->resolution_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling resolver queued pick: "
"error=%s self=%p calld->resolver_pick_canceller=%p",
chand, calld, StatusToString(error).c_str(), self,
calld->resolver_call_canceller_);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": cancelling resolver queued pick: "
"error="
<< StatusToString(error) << " self=" << self
<< " calld->resolver_pick_canceller="
<< calld->resolver_call_canceller_;
if (calld->resolver_call_canceller_ == self && !error.ok()) {
// Remove pick from list of queued picks.
calld->RemoveCallFromResolverQueuedCallsLocked();
@ -2360,19 +2302,14 @@ void ClientChannelFilter::FilterBasedCallData::CreateDynamicCall() {
call_combiner()};
grpc_error_handle error;
DynamicFilters* channel_stack = args.channel_stack.get();
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p: creating dynamic call stack on channel_stack=%p",
chand(), this, channel_stack);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": creating dynamic call stack on channel_stack=" << channel_stack;
dynamic_call_ = channel_stack->CreateCall(std::move(args), &error);
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create dynamic call: error=%s",
chand(), this, StatusToString(error).c_str());
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand() << " calld=" << this
<< ": failed to create dynamic call: error=" << StatusToString(error);
PendingBatchesFail(error, YieldCallCombiner);
return;
}
@ -2385,13 +2322,10 @@ void ClientChannelFilter::FilterBasedCallData::
auto* calld = static_cast<FilterBasedCallData*>(arg);
auto* chand = calld->chand();
auto* service_config_call_data = GetServiceConfigCallData(calld->arena());
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_trailing_metadata_ready: error=%s "
"service_config_call_data=%p",
chand, calld, StatusToString(error).c_str(),
service_config_call_data);
}
GRPC_TRACE_LOG(client_channel_call, INFO)
<< "chand=" << chand << " calld=" << calld
<< ": got recv_trailing_metadata_ready: error=" << StatusToString(error)
<< " service_config_call_data=" << service_config_call_data;
if (service_config_call_data != nullptr) {
service_config_call_data->Commit();
}
@ -2578,17 +2512,15 @@ void CreateCallAttemptTracer(Arena* arena, bool is_transparent_retry) {
ClientChannelFilter::LoadBalancedCall::LoadBalancedCall(
ClientChannelFilter* chand, Arena* arena,
absl::AnyInvocable<void()> on_commit, bool is_transparent_retry)
: InternallyRefCounted(
GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)
: InternallyRefCounted(GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call)
? "LoadBalancedCall"
: nullptr),
chand_(chand),
on_commit_(std::move(on_commit)),
arena_(arena) {
CreateCallAttemptTracer(arena, is_transparent_retry);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: created", chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this << ": created";
}
ClientChannelFilter::LoadBalancedCall::~LoadBalancedCall() {
@ -2629,10 +2561,9 @@ void ClientChannelFilter::LoadBalancedCall::RecordLatency() {
void ClientChannelFilter::LoadBalancedCall::
RemoveCallFromLbQueuedCallsLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: removing from queued picks list",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": removing from queued picks list";
// Remove pollset_set linkage.
grpc_polling_entity_del_from_pollset_set(pollent(),
chand_->interested_parties_);
@ -2643,10 +2574,9 @@ void ClientChannelFilter::LoadBalancedCall::
}
void ClientChannelFilter::LoadBalancedCall::AddCallToLbQueuedCallsLocked() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: adding to queued picks list",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": adding to queued picks list";
// Add call's pollent to channel's interested_parties, so that I/O
// can be done under the call's CQ.
grpc_polling_entity_add_to_pollset_set(pollent(),
@ -2688,10 +2618,9 @@ ClientChannelFilter::LoadBalancedCall::PickSubchannel(bool was_queued) {
};
}
// Grab mutex and take a ref to the picker.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: grabbing LB mutex to get picker",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": grabbing LB mutex to get picker";
RefCountedPtr<LoadBalancingPolicy::SubchannelPicker> picker;
{
MutexLock lock(&chand_->lb_mu_);
@ -2701,17 +2630,15 @@ ClientChannelFilter::LoadBalancedCall::PickSubchannel(bool was_queued) {
// TODO(roth): Fix race condition in channel_idle filter and any
// other possible causes of this.
if (pickers.back() == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_ERROR, "chand=%p lb_call=%p: picker is null, failing call",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": picker is null, failing call";
return absl::InternalError("picker is null -- shouldn't happen");
}
// Do pick.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: performing pick with picker=%p",
chand_, this, pickers.back().get());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": performing pick with picker=" << pickers.back().get();
grpc_error_handle error;
bool pick_complete = PickSubchannelImpl(pickers.back().get(), &error);
if (!pick_complete) {
@ -2719,11 +2646,9 @@ ClientChannelFilter::LoadBalancedCall::PickSubchannel(bool was_queued) {
MutexLock lock(&chand_->lb_mu_);
// If picker has been swapped out since we grabbed it, try again.
if (pickers.back() != chand_->picker_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: pick not complete, but picker changed",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": pick not complete, but picker changed";
if (IsWorkSerializerDispatchEnabled()) {
// Don't unref until after we release the mutex.
old_picker = std::move(pickers.back());
@ -2742,11 +2667,9 @@ ClientChannelFilter::LoadBalancedCall::PickSubchannel(bool was_queued) {
}
// If the pick failed, fail the call.
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: failed to pick subchannel: error=%s",
chand_, this, StatusToString(error).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": failed to pick subchannel: error=" << StatusToString(error);
return error;
}
// Pick succeeded.
@ -2772,11 +2695,10 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
&result,
// CompletePick
[this](LoadBalancingPolicy::PickResult::Complete* complete_pick) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: LB pick succeeded: subchannel=%p",
chand_, this, complete_pick->subchannel.get());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": LB pick succeeded: subchannel="
<< complete_pick->subchannel.get();
CHECK(complete_pick->subchannel != nullptr);
// Grab a ref to the connected subchannel while we're still
// holding the data plane mutex.
@ -2788,12 +2710,10 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
// yet seen that change and given us a new picker), then just
// queue the pick. We'll try again as soon as we get a new picker.
if (connected_subchannel_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: subchannel returned by LB picker "
"has no connected subchannel; queueing pick",
chand_, this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": subchannel returned by LB picker "
"has no connected subchannel; queueing pick";
return false;
}
lb_subchannel_call_tracker_ =
@ -2805,18 +2725,15 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
},
// QueuePick
[this](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick queued", chand_,
this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this << ": LB pick queued";
return false;
},
// FailPick
[this, &error](LoadBalancingPolicy::PickResult::Fail* fail_pick) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick failed: %s", chand_,
this, fail_pick->status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": LB pick failed: " << fail_pick->status;
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
if (!send_initial_metadata()
@ -2832,10 +2749,9 @@ bool ClientChannelFilter::LoadBalancedCall::PickSubchannelImpl(
},
// DropPick
[this, &error](LoadBalancingPolicy::PickResult::Drop* drop_pick) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: LB pick dropped: %s", chand_,
this, drop_pick->status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand_ << " lb_call=" << this
<< ": LB pick dropped: " << drop_pick->status;
*error = grpc_error_set_int(
absl_status_to_grpc_error(MaybeRewriteIllegalStatusCode(
std::move(drop_pick->status), "LB drop")),
@ -2901,11 +2817,9 @@ size_t ClientChannelFilter::FilterBasedLoadBalancedCall::GetBatchIndex(
void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: adding pending batch at index %" PRIuPTR,
chand(), this, idx);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": adding pending batch at index " << idx;
CHECK_EQ(pending_batches_[idx], nullptr);
pending_batches_[idx] = batch;
}
@ -2928,7 +2842,7 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesFail(
YieldCallCombinerPredicate yield_call_combiner_predicate) {
CHECK(!error.ok());
failure_error_ = error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
@ -2970,7 +2884,7 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
// This is called via the call combiner, so access to calld is synchronized.
void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesResume() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i] != nullptr) ++num_batches;
@ -2999,8 +2913,8 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::PendingBatchesResume() {
void ClientChannelFilter::FilterBasedLoadBalancedCall::
StartTransportStreamOpBatch(grpc_transport_stream_op_batch* batch) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_trace_channel)) {
if (GRPC_TRACE_FLAG_ENABLED(client_channel_lb_call) ||
GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: batch started from above: %s, "
"call_attempt_tracer()=%p",
@ -3054,11 +2968,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
// the channel's data plane mutex, which is more efficient (especially for
// streaming calls).
if (subchannel_call_ != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: starting batch on subchannel_call=%p",
chand(), this, subchannel_call_.get());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": starting batch on subchannel_call=" << subchannel_call_.get();
subchannel_call_->StartTransportStreamOpBatch(batch);
return;
}
@ -3066,10 +2978,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(!cancel_error_.ok())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: failing batch with error: %s",
chand(), this, StatusToString(cancel_error_).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": failing batch with error: " << StatusToString(cancel_error_);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
call_combiner_);
@ -3083,10 +2994,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
cancel_error_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
chand(), this, StatusToString(cancel_error_).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": recording cancel_error=" << StatusToString(cancel_error_).c_str();
// Fail all pending batches.
PendingBatchesFail(cancel_error_, NoYieldCallCombiner);
// Note: This will release the call combiner.
@ -3102,11 +3012,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
TryPick(/*was_queued=*/false);
} else {
// For all other batches, release the call combiner.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: saved batch, yielding call combiner",
chand(), this);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": saved batch, yielding call combiner";
GRPC_CALL_COMBINER_STOP(call_combiner_,
"batch does not include send_initial_metadata");
}
@ -3115,11 +3023,9 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::
void ClientChannelFilter::FilterBasedLoadBalancedCall::RecvInitialMetadataReady(
void* arg, grpc_error_handle error) {
auto* self = static_cast<FilterBasedLoadBalancedCall*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: got recv_initial_metadata_ready: error=%s",
self->chand(), self, StatusToString(error).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << self->chand() << " lb_call=" << self
<< ": got recv_initial_metadata_ready: error=" << StatusToString(error);
if (error.ok()) {
// recv_initial_metadata_flags is not populated for clients
self->call_attempt_tracer()->RecordReceivedInitialMetadata(
@ -3134,15 +3040,12 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::RecvInitialMetadataReady(
void ClientChannelFilter::FilterBasedLoadBalancedCall::
RecvTrailingMetadataReady(void* arg, grpc_error_handle error) {
auto* self = static_cast<FilterBasedLoadBalancedCall*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: got recv_trailing_metadata_ready: error=%s "
"call_attempt_tracer()=%p lb_subchannel_call_tracker_=%p "
"failure_error_=%s",
self->chand(), self, StatusToString(error).c_str(),
self->call_attempt_tracer(), self->lb_subchannel_call_tracker(),
StatusToString(self->failure_error_).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << self->chand() << " lb_call=" << self
<< ": got recv_trailing_metadata_ready: error=" << StatusToString(error)
<< " call_attempt_tracer()=" << self->call_attempt_tracer()
<< " lb_subchannel_call_tracker_=" << self->lb_subchannel_call_tracker()
<< " failure_error_=" << StatusToString(self->failure_error_);
// Check if we have a tracer or an LB callback to invoke.
if (self->call_attempt_tracer() != nullptr ||
self->lb_subchannel_call_tracker() != nullptr) {
@ -3210,13 +3113,11 @@ class ClientChannelFilter::FilterBasedLoadBalancedCall::LbQueuedCallCanceller
auto* chand = lb_call->chand();
{
MutexLock lock(&chand->lb_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: cancelling queued pick: "
"error=%s self=%p calld->pick_canceller=%p",
chand, lb_call, StatusToString(error).c_str(), self,
lb_call->lb_call_canceller_);
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand << " lb_call=" << lb_call
<< ": cancelling queued pick: error=" << StatusToString(error)
<< " self=" << self
<< " calld->pick_canceller=" << lb_call->lb_call_canceller_;
if (lb_call->lb_call_canceller_ == self && !error.ok()) {
lb_call->Commit();
// Remove pick from list of queued picks.
@ -3298,11 +3199,10 @@ void ClientChannelFilter::FilterBasedLoadBalancedCall::CreateSubchannelCall() {
arena(), call_combiner_};
grpc_error_handle error;
subchannel_call_ = SubchannelCall::Create(std::move(call_args), &error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: create subchannel_call=%p: error=%s", chand(),
this, subchannel_call_.get(), StatusToString(error).c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "chand=" << chand() << " lb_call=" << this
<< ": create subchannel_call=" << subchannel_call_.get()
<< ": error=" << StatusToString(error);
if (on_call_destruction_complete_ != nullptr) {
subchannel_call_->SetAfterCallStackDestroy(on_call_destruction_complete_);
on_call_destruction_complete_ = nullptr;

@ -24,12 +24,6 @@
namespace grpc_core {
// Defined in legacy client channel filter.
// TODO(roth): Move these here when we remove the legacy filter.
extern TraceFlag grpc_client_channel_trace;
extern TraceFlag grpc_client_channel_call_trace;
extern TraceFlag grpc_client_channel_lb_call_trace;
namespace {
class LbMetadata : public LoadBalancingPolicy::MetadataInterface {
@ -185,12 +179,10 @@ LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> PickSubchannel(
// CompletePick
[&](LoadBalancingPolicy::PickResult::Complete* complete_pick)
-> LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"client_channel: %sLB pick succeeded: subchannel=%p",
GetContext<Activity>()->DebugTag().c_str(),
complete_pick->subchannel.get());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "client_channel: " << GetContext<Activity>()->DebugTag()
<< " pick succeeded: subchannel="
<< complete_pick->subchannel.get();
CHECK(complete_pick->subchannel != nullptr);
// Grab a ref to the call destination while we're still
// holding the data plane mutex.
@ -203,12 +195,10 @@ LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> PickSubchannel(
// yet seen that change and given us a new picker), then just
// queue the pick. We'll try again as soon as we get a new picker.
if (call_destination == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"client_channel: %ssubchannel returned by LB picker "
"has no connected subchannel; queueing pick",
GetContext<Activity>()->DebugTag().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "client_channel: " << GetContext<Activity>()->DebugTag()
<< " returned by LB picker has no connected subchannel; queueing "
"pick";
return Continue{};
}
// If the LB policy returned a call tracker, inform it that the
@ -223,20 +213,17 @@ LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> PickSubchannel(
},
// QueuePick
[&](LoadBalancingPolicy::PickResult::Queue* /*queue_pick*/) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "client_channel: %sLB pick queued",
GetContext<Activity>()->DebugTag().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "client_channel: " << GetContext<Activity>()->DebugTag()
<< " pick queued";
return Continue{};
},
// FailPick
[&](LoadBalancingPolicy::PickResult::Fail* fail_pick)
-> LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "client_channel: %sLB pick failed: %s",
GetContext<Activity>()->DebugTag().c_str(),
fail_pick->status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "client_channel: " << GetContext<Activity>()->DebugTag()
<< " pick failed: " << fail_pick->status;
// If wait_for_ready is false, then the error indicates the RPC
// attempt's final status.
if (!unstarted_handler.UnprocessedClientInitialMetadata()
@ -252,11 +239,9 @@ LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> PickSubchannel(
// DropPick
[&](LoadBalancingPolicy::PickResult::Drop* drop_pick)
-> LoopCtl<absl::StatusOr<RefCountedPtr<UnstartedCallDestination>>> {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "client_channel: %sLB pick dropped: %s",
GetContext<Activity>()->DebugTag().c_str(),
drop_pick->status.ToString().c_str());
}
GRPC_TRACE_LOG(client_channel_lb_call, INFO)
<< "client_channel: " << GetContext<Activity>()->DebugTag()
<< " pick dropped: " << drop_pick->status;
return grpc_error_set_int(MaybeRewriteIllegalStatusCode(
std::move(drop_pick->status), "LB drop"),
StatusIntProperty::kLbPolicyDrop, 1);

@ -89,8 +89,6 @@ using grpc_core::internal::RetryMethodConfig;
using grpc_core::internal::RetryServiceConfigParser;
using grpc_event_engine::experimental::EventEngine;
grpc_core::TraceFlag grpc_retry_trace(false, "retry");
namespace grpc_core {
//

@ -38,14 +38,11 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_fwd.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/util/useful.h"
extern grpc_core::TraceFlag grpc_retry_trace;
namespace grpc_core {
class RetryFilter final {

@ -116,8 +116,7 @@ class RetryFilter::LegacyCallData::CallStackDestructionBarrier final
RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
RetryFilter::LegacyCallData* calld, bool is_transparent_retry)
: RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace) ? "CallAttempt"
: nullptr),
: RefCounted(GRPC_TRACE_FLAG_ENABLED(retry) ? "CallAttempt" : nullptr),
calld_(calld),
started_send_initial_metadata_(false),
completed_send_initial_metadata_(false),
@ -141,7 +140,7 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
}
},
is_transparent_retry);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: created attempt, lb_call=%p",
calld->chand_, calld, this, lb_call_.get());
@ -151,7 +150,7 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
calld->retry_policy_->per_attempt_recv_timeout().has_value()) {
const Duration per_attempt_recv_timeout =
*calld->retry_policy_->per_attempt_recv_timeout();
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: per-attempt timeout in %" PRId64
" ms",
@ -170,7 +169,7 @@ RetryFilter::LegacyCallData::CallAttempt::CallAttempt(
}
RetryFilter::LegacyCallData::CallAttempt::~CallAttempt() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying call attempt",
calld_->chand_, calld_, this);
}
@ -237,7 +236,7 @@ void RetryFilter::LegacyCallData::CallAttempt::MaybeSwitchToFastPath() {
// yet seen that op from the surface, we can't switch yet.
if (recv_trailing_metadata_internal_batch_ != nullptr) return;
// Switch to fast path.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retry state no longer needed; "
"moving LB call to parent and unreffing the call attempt",
@ -256,7 +255,7 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
// send_initial_metadata.
if (calld_->seen_send_initial_metadata_ && !started_send_initial_metadata_ &&
!calld_->pending_send_initial_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_initial_metadata op",
@ -270,7 +269,7 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
if (started_send_message_count_ < calld_->send_messages_.size() &&
started_send_message_count_ == completed_send_message_count_ &&
!calld_->pending_send_message_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_message op",
@ -289,7 +288,7 @@ RetryFilter::LegacyCallData::CallAttempt::MaybeCreateBatchForReplay() {
started_send_message_count_ == calld_->send_messages_.size() &&
!started_send_trailing_metadata_ &&
!calld_->pending_send_trailing_metadata_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: replaying previously completed "
"send_trailing_metadata op",
@ -320,7 +319,7 @@ void StartBatchInCallCombiner(void* arg, grpc_error_handle /*ignored*/) {
void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
grpc_transport_stream_op_batch* batch, const char* reason,
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: adding batch (%s): %s",
calld_->chand_, calld_, this, reason,
grpc_transport_stream_op_batch_string(batch, false).c_str());
@ -333,7 +332,7 @@ void RetryFilter::LegacyCallData::CallAttempt::AddClosureForBatch(
void RetryFilter::LegacyCallData::CallAttempt::
AddBatchForInternalRecvTrailingMetadata(CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call failed but "
"recv_trailing_metadata not started; starting it internally",
@ -523,7 +522,7 @@ void RetryFilter::LegacyCallData::CallAttempt::AddRetriableBatches(
}
void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: constructing retriable batches",
calld_->chand_, calld_, this);
@ -533,7 +532,7 @@ void RetryFilter::LegacyCallData::CallAttempt::StartRetriableBatches() {
AddRetriableBatches(&closures);
// Note: This will yield the call combiner.
// Start batches on LB call.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting %" PRIuPTR
" retriable batches on lb_call=%p",
@ -561,7 +560,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
if (calld_->retry_throttle_data_ != nullptr) {
calld_->retry_throttle_data_->RecordSuccess();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: call succeeded",
calld_->chand_, calld_, this);
}
@ -569,7 +568,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
}
// Status is not OK. Check whether the status is retryable.
if (!calld_->retry_policy_->retryable_status_codes().Contains(*status)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: status %s not configured as "
"retryable",
@ -588,7 +587,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
// checks, so that we don't fail to record failures due to other factors.
if (calld_->retry_throttle_data_ != nullptr &&
!calld_->retry_throttle_data_->RecordFailure()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: retries throttled",
calld_->chand_, calld_, this);
}
@ -596,7 +595,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
}
// Check whether the call is committed.
if (calld_->retry_committed_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: retries already committed",
calld_->chand_, calld_, this);
@ -607,7 +606,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
++calld_->num_attempts_completed_;
if (calld_->num_attempts_completed_ >=
calld_->retry_policy_->max_attempts()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO, "chand=%p calld=%p attempt=%p: exceeded %d retry attempts",
calld_->chand_, calld_, this, calld_->retry_policy_->max_attempts());
@ -617,7 +616,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
// Check server push-back.
if (server_pushback.has_value()) {
if (*server_pushback < Duration::Zero()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: not retrying due to server "
"push-back",
@ -625,7 +624,7 @@ bool RetryFilter::LegacyCallData::CallAttempt::ShouldRetry(
}
return false;
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: server push-back: retry in %" PRIu64
@ -675,7 +674,7 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void* arg, grpc_error_handle error) {
auto* call_attempt = static_cast<CallAttempt*>(arg);
auto* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: perAttemptRecvTimeout timer fired: "
"error=%s, per_attempt_recv_timer_handle_.has_value()=%d",
@ -714,7 +713,7 @@ void RetryFilter::LegacyCallData::CallAttempt::OnPerAttemptRecvTimerLocked(
void RetryFilter::LegacyCallData::CallAttempt::
MaybeCancelPerAttemptRecvTimer() {
if (per_attempt_recv_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: cancelling "
"perAttemptRecvTimeout timer",
@ -735,11 +734,10 @@ void RetryFilter::LegacyCallData::CallAttempt::
RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
RefCountedPtr<CallAttempt> attempt, int refcount, bool set_on_complete)
: RefCounted(
GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace) ? "BatchData" : nullptr,
: RefCounted(GRPC_TRACE_FLAG_ENABLED(retry) ? "BatchData" : nullptr,
refcount),
call_attempt_(attempt.release()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: creating batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
this);
@ -760,7 +758,7 @@ RetryFilter::LegacyCallData::CallAttempt::BatchData::BatchData(
}
RetryFilter::LegacyCallData::CallAttempt::BatchData::~BatchData() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: destroying batch %p",
call_attempt_->calld_->chand_, call_attempt_->calld_, call_attempt_,
this);
@ -833,7 +831,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_initial_metadata_ready, error=%s",
@ -860,7 +858,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
if (GPR_UNLIKELY(
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring "
"recv_initial_metadata_ready (Trailers-Only)",
@ -932,7 +930,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_message_ready, error=%s",
@ -962,7 +960,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::RecvMessageReady(
if (GPR_UNLIKELY(
(!call_attempt->recv_message_.has_value() || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
"(nullptr message and recv_trailing_metadata pending)",
@ -1126,7 +1124,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got recv_trailing_metadata_ready, error=%s",
@ -1153,7 +1151,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata;
GetCallStatus(calld->deadline_, md_batch, error, &status, &server_pushback,
&is_lb_drop, &stream_network_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s "
"server_pushback=%s is_lb_drop=%d stream_network_state=%s",
@ -1274,7 +1272,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
}
}
if (have_pending_send_ops) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: starting next batch for pending "
"send op(s)",
@ -1289,7 +1287,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete, error=%s, batch=%s",
@ -1311,7 +1309,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnComplete(
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
calld->chand_, calld, call_attempt);
}
@ -1365,7 +1363,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::OnCompleteForCancelOp(
RefCountedPtr<BatchData> batch_data(static_cast<BatchData*>(arg));
CallAttempt* call_attempt = batch_data->call_attempt_;
RetryFilter::LegacyCallData* calld = call_attempt->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p batch_data=%p: "
"got on_complete for cancel_stream batch, error=%s, batch=%s",
@ -1409,7 +1407,7 @@ void RetryFilter::LegacyCallData::CallAttempt::BatchData::
void RetryFilter::LegacyCallData::CallAttempt::BatchData::
AddRetriableSendMessageOp() {
auto* calld = call_attempt_->calld_;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(
GPR_INFO,
"chand=%p calld=%p attempt=%p: starting calld->send_messages[%" PRIuPTR
@ -1498,7 +1496,7 @@ grpc_error_handle RetryFilter::LegacyCallData::Init(
grpc_call_element* elem, const grpc_call_element_args* args) {
auto* chand = static_cast<RetryFilter*>(elem->channel_data);
new (elem->call_data) RetryFilter::LegacyCallData(chand, *args);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: created call", chand,
elem->call_data);
}
@ -1580,8 +1578,7 @@ RetryFilter::LegacyCallData::~LegacyCallData() {
void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
grpc_transport_stream_op_batch* batch) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace) &&
!GRPC_TRACE_FLAG_ENABLED(grpc_trace_channel)) {
if (GRPC_TRACE_FLAG_ENABLED(retry) && !GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: batch started from surface: %s",
chand_, this,
grpc_transport_stream_op_batch_string(batch, false).c_str());
@ -1604,7 +1601,7 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (GPR_UNLIKELY(batch->cancel_stream)) {
// Save cancel_error in case subsequent batches are started.
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_,
this, StatusToString(cancelled_from_surface_).c_str());
}
@ -1627,7 +1624,7 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
}
// Cancel retry timer if needed.
if (retry_timer_handle_.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling retry timer", chand_,
this);
}
@ -1673,7 +1670,7 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
if (!retry_codepath_started_ && retry_committed_ &&
(retry_policy_ == nullptr ||
!retry_policy_->per_attempt_recv_timeout().has_value())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retry committed before first attempt; "
"creating LB call",
@ -1692,7 +1689,7 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
// Otherwise, create a call attempt.
// The attempt will automatically start any necessary replays or
// pending batches.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: creating call attempt", chand_,
this);
}
@ -1701,7 +1698,7 @@ void RetryFilter::LegacyCallData::StartTransportStreamOpBatch(
return;
}
// Send batches to call attempt.
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: starting batch on attempt=%p", chand_,
this, call_attempt_.get());
}
@ -1759,7 +1756,7 @@ void RetryFilter::LegacyCallData::MaybeCacheSendOpsForBatch(
}
void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_initial_metadata",
chand_, this);
}
@ -1768,7 +1765,7 @@ void RetryFilter::LegacyCallData::FreeCachedSendInitialMetadata() {
void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
if (send_messages_[idx].slices != nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying send_messages[%" PRIuPTR "]",
chand_, this, idx);
@ -1778,7 +1775,7 @@ void RetryFilter::LegacyCallData::FreeCachedSendMessage(size_t idx) {
}
void RetryFilter::LegacyCallData::FreeCachedSendTrailingMetadata() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: destroying send_trailing_metadata",
chand_, this);
}
@ -1817,7 +1814,7 @@ RetryFilter::LegacyCallData::PendingBatch*
RetryFilter::LegacyCallData::PendingBatchesAdd(
grpc_transport_stream_op_batch* batch) {
const size_t idx = GetBatchIndex(batch);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR,
chand_, this, idx);
@ -1848,7 +1845,7 @@ RetryFilter::LegacyCallData::PendingBatchesAdd(
// ops have already been sent, and we commit to that attempt.
if (GPR_UNLIKELY(bytes_buffered_for_retry_ >
chand_->per_rpc_retry_buffer_size())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing",
chand_, this);
@ -1885,7 +1882,7 @@ void RetryFilter::LegacyCallData::MaybeClearPendingBatch(
(!batch->recv_trailing_metadata ||
batch->payload->recv_trailing_metadata.recv_trailing_metadata_ready ==
nullptr)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand_,
this);
}
@ -1908,7 +1905,7 @@ void RetryFilter::LegacyCallData::FailPendingBatchInCallCombiner(
// This is called via the call combiner, so access to calld is synchronized.
void RetryFilter::LegacyCallData::PendingBatchesFail(grpc_error_handle error) {
CHECK(!error.ok());
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
if (pending_batches_[i].batch != nullptr) ++num_batches;
@ -1942,7 +1939,7 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
PendingBatch* pending = &pending_batches_[i];
grpc_transport_stream_op_batch* batch = pending->batch;
if (batch != nullptr && predicate(batch)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: %s pending batch at index %" PRIuPTR,
chand_, this, log_message, i);
@ -1960,7 +1957,7 @@ RetryFilter::LegacyCallData::PendingBatchFind(const char* log_message,
void RetryFilter::LegacyCallData::RetryCommit(CallAttempt* call_attempt) {
if (retry_committed_) return;
retry_committed_ = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand_, this);
}
if (call_attempt != nullptr) {
@ -1994,7 +1991,7 @@ void RetryFilter::LegacyCallData::StartRetryTimer(
} else {
next_attempt_timeout = retry_backoff_.NextAttemptTime() - Timestamp::Now();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRId64 " ms", chand_,
this, next_attempt_timeout.millis());
@ -2027,7 +2024,7 @@ void RetryFilter::LegacyCallData::OnRetryTimerLocked(
void RetryFilter::LegacyCallData::AddClosureToStartTransparentRetry(
CallCombinerClosureList* closures) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(retry)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: scheduling transparent retry", chand_,
this);
}

@ -94,9 +94,6 @@ namespace grpc_core {
using ::grpc_event_engine::experimental::EventEngine;
TraceFlag grpc_trace_subchannel(false, "subchannel");
DebugOnlyTraceFlag grpc_trace_subchannel_refcount(false, "subchannel_refcount");
//
// ConnectedSubchannel
//
@ -105,8 +102,7 @@ ConnectedSubchannel::ConnectedSubchannel(
const ChannelArgs& args,
RefCountedPtr<channelz::SubchannelNode> channelz_subchannel)
: RefCounted<ConnectedSubchannel>(
GRPC_TRACE_FLAG_ENABLED(grpc_trace_subchannel_refcount)
? "ConnectedSubchannel"
GRPC_TRACE_FLAG_ENABLED(subchannel_refcount) ? "ConnectedSubchannel"
: nullptr),
args_(args),
channelz_subchannel_(std::move(channelz_subchannel)) {}
@ -421,7 +417,7 @@ class Subchannel::ConnectedSubchannelStateWatcher final
if (c->connected_subchannel_ == nullptr) return;
if (new_state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
new_state == GRPC_CHANNEL_SHUTDOWN) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO,
"subchannel %p %s: Connected subchannel %p reports %s: %s", c,
c->key_.ToString().c_str(), c->connected_subchannel_.get(),
@ -520,8 +516,8 @@ BackOff::Options ParseArgsForBackoffValues(const ChannelArgs& args,
Subchannel::Subchannel(SubchannelKey key,
OrphanablePtr<SubchannelConnector> connector,
const ChannelArgs& args)
: DualRefCounted<Subchannel>(
GRPC_TRACE_FLAG_ENABLED(grpc_trace_subchannel_refcount) ? "Subchannel"
: DualRefCounted<Subchannel>(GRPC_TRACE_FLAG_ENABLED(subchannel_refcount)
? "Subchannel"
: nullptr),
key_(std::move(key)),
args_(args),
@ -606,7 +602,7 @@ void Subchannel::ThrottleKeepaliveTime(int new_keepalive_time) {
// Only update the value if the new keepalive time is larger.
if (new_keepalive_time > keepalive_time_) {
keepalive_time_ = new_keepalive_time;
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO, "subchannel %p %s: throttling keepalive time to %d",
this, key_.ToString().c_str(), new_keepalive_time);
}
@ -881,7 +877,7 @@ bool Subchannel::PublishTransportLocked() {
}
connecting_result_.Reset();
// Publish.
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_subchannel)) {
if (GRPC_TRACE_FLAG_ENABLED(subchannel)) {
gpr_log(GPR_INFO, "subchannel %p %s: new connected subchannel at %p", this,
key_.ToString().c_str(), connected_subchannel_.get());
}

@ -36,8 +36,6 @@
namespace grpc_core {
TraceFlag grpc_subchannel_pool_trace(false, "subchannel_pool");
SubchannelKey::SubchannelKey(const grpc_resolved_address& address,
const ChannelArgs& args)
: address_(address), args_(args) {}

@ -36,8 +36,6 @@ namespace grpc_core {
class Subchannel;
extern TraceFlag grpc_subchannel_pool_trace;
// A key that can uniquely identify a subchannel.
class SubchannelKey final {
public:
@ -68,7 +66,7 @@ class SubchannelKey final {
class SubchannelPoolInterface : public RefCounted<SubchannelPoolInterface> {
public:
SubchannelPoolInterface()
: RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_subchannel_pool_trace)
: RefCounted(GRPC_TRACE_FLAG_ENABLED(subchannel_pool)
? "SubchannelPoolInterface"
: nullptr) {}
~SubchannelPoolInterface() override {}

@ -46,8 +46,6 @@
namespace grpc_core {
TraceFlag grpc_backend_metric_filter_trace(false, "backend_metric_filter");
const NoInterceptor BackendMetricFilter::Call::OnClientInitialMetadata;
const NoInterceptor BackendMetricFilter::Call::OnServerInitialMetadata;
const NoInterceptor BackendMetricFilter::Call::OnClientToServerMessage;
@ -131,20 +129,20 @@ void BackendMetricFilter::Call::OnServerTrailingMetadata(ServerMetadata& md) {
if (md.get(GrpcCallWasCancelled()).value_or(false)) return;
auto* ctx = MaybeGetContext<BackendMetricProvider>();
if (ctx == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_backend_metric_filter_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
gpr_log(GPR_INFO, "[%p] No BackendMetricProvider.", this);
}
return;
}
absl::optional<std::string> serialized = MaybeSerializeBackendMetrics(ctx);
if (serialized.has_value() && !serialized->empty()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_backend_metric_filter_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
gpr_log(GPR_INFO, "[%p] Backend metrics serialized. size: %" PRIuPTR,
this, serialized->size());
}
md.Set(EndpointLoadMetricsBinMetadata(),
Slice::FromCopiedString(std::move(*serialized)));
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_backend_metric_filter_trace)) {
} else if (GRPC_TRACE_FLAG_ENABLED(backend_metric_filter)) {
gpr_log(GPR_INFO, "[%p] No backend metrics.", this);
}
}

@ -68,12 +68,11 @@ const auto kDefaultMaxConnectionAgeGrace = Duration::Infinity();
const auto kDefaultMaxConnectionIdle = Duration::Infinity();
const auto kMaxConnectionAgeJitter = 0.1;
TraceFlag grpc_trace_client_idle_filter(false, "client_idle_filter");
} // namespace
#define GRPC_IDLE_FILTER_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_client_idle_filter)) { \
if (GRPC_TRACE_FLAG_ENABLED(client_idle_filter)) { \
gpr_log(GPR_INFO, "(client idle filter) " format, ##__VA_ARGS__); \
} \
} while (0)

@ -53,7 +53,6 @@
namespace grpc_core {
TraceFlag grpc_fault_injection_filter_trace(false, "fault_injection_filter");
const NoInterceptor FaultInjectionFilter::Call::OnServerInitialMetadata;
const NoInterceptor FaultInjectionFilter::Call::OnServerTrailingMetadata;
const NoInterceptor FaultInjectionFilter::Call::OnClientToServerMessage;
@ -151,7 +150,7 @@ FaultInjectionFilter::FaultInjectionFilter(ChannelFilter::Args filter_args)
ArenaPromise<absl::Status> FaultInjectionFilter::Call::OnClientInitialMetadata(
ClientMetadata& md, FaultInjectionFilter* filter) {
auto decision = filter->MakeInjectionDecision(md);
if (GRPC_TRACE_FLAG_ENABLED(grpc_fault_injection_filter_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(fault_injection_filter)) {
gpr_log(GPR_INFO, "chand=%p: Fault injection triggered %s", this,
decision.ToString().c_str());
}

@ -49,7 +49,6 @@
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/telemetry/call_tracer.h"
@ -114,7 +113,7 @@ ChannelCompression::ChannelCompression(const ChannelArgs& args)
MessageHandle ChannelCompression::CompressMessage(
MessageHandle message, grpc_compression_algorithm algorithm) const {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
gpr_log(GPR_INFO, "CompressMessage: len=%" PRIdPTR " alg=%d flags=%d",
message->payload()->Length(), algorithm, message->flags());
}
@ -138,7 +137,7 @@ MessageHandle ChannelCompression::CompressMessage(
// If we achieved compression send it as compressed, otherwise send it as (to
// avoid spending cycles on the receiver decompressing).
if (did_compress) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
const char* algo_name;
const size_t before_size = payload->Length();
const size_t after_size = tmp.Length();
@ -156,7 +155,7 @@ MessageHandle ChannelCompression::CompressMessage(
call_tracer->RecordSendCompressedMessage(*message->payload());
}
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
const char* algo_name;
CHECK(grpc_compression_algorithm_name(algorithm, &algo_name));
gpr_log(GPR_INFO,
@ -170,7 +169,7 @@ MessageHandle ChannelCompression::CompressMessage(
absl::StatusOr<MessageHandle> ChannelCompression::DecompressMessage(
bool is_client, MessageHandle message, DecompressArgs args) const {
if (GRPC_TRACE_FLAG_ENABLED(grpc_compression_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(compression)) {
gpr_log(GPR_INFO, "DecompressMessage: len=%" PRIdPTR " max=%d alg=%d",
message->payload()->Length(),
args.max_recv_message_length.value_or(-1), args.algorithm);

@ -44,7 +44,6 @@
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
namespace grpc_core {
@ -140,7 +139,7 @@ ServerMetadataHandle HttpServerFilter::Call::OnClientInitialMetadata(
}
void HttpServerFilter::Call::OnServerInitialMetadata(ServerMetadata& md) {
if (grpc_call_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(call)) {
gpr_log(GPR_INFO, "%s[http-server] Write metadata",
GetContext<Activity>()->DebugTag().c_str());
}

@ -40,7 +40,6 @@
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@ -160,7 +159,7 @@ ServerMetadataHandle CheckPayload(const Message& msg,
absl::optional<uint32_t> max_length,
bool is_client, bool is_send) {
if (!max_length.has_value()) return nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call)) {
gpr_log(GPR_INFO, "%s[message_size] %s len:%" PRIdPTR " max:%d",
GetContext<Activity>()->DebugTag().c_str(),
is_send ? "send" : "recv", msg.payload()->Length(), *max_length);

@ -57,7 +57,6 @@
namespace grpc_core {
TraceFlag grpc_stateful_session_filter_trace(false, "stateful_session_filter");
const NoInterceptor StatefulSessionFilter::Call::OnClientToServerMessage;
const NoInterceptor StatefulSessionFilter::Call::OnClientToServerHalfClose;
const NoInterceptor StatefulSessionFilter::Call::OnServerToClientMessage;

@ -1,21 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/ext/transport/chaotic_good/chaotic_good_transport.h"
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_chaotic_good_trace(false, "chaotic_good");
namespace grpc_core {} // namespace grpc_core

@ -33,8 +33,6 @@
#include "src/core/lib/promise/try_seq.h"
#include "src/core/lib/transport/promise_endpoint.h"
extern grpc_core::TraceFlag grpc_chaotic_good_trace;
namespace grpc_core {
namespace chaotic_good {
@ -55,7 +53,7 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
auto WriteFrame(const FrameInterface& frame) {
auto buffers = frame.Serialize(&encoder_);
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: WriteFrame to:%s %s",
ResolvedAddressToString(control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
@ -76,7 +74,7 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
auto frame_header =
FrameHeader::Parse(reinterpret_cast<const uint8_t*>(
GRPC_SLICE_START_PTR(read_buffer.c_slice())));
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: ReadHeader from:%s %s",
ResolvedAddressToString(control_endpoint_.GetPeerAddress())
.value_or("<<unknown peer address>>")
@ -125,7 +123,7 @@ class ChaoticGoodTransport : public RefCounted<ChaoticGoodTransport> {
FrameLimits limits) {
auto s = frame.Deserialize(&parser_, header, bitgen_, arena,
std::move(buffers), limits);
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: DeserializeFrame %s",
s.ok() ? frame.ToString().c_str() : s.ToString().c_str());
}

@ -313,7 +313,7 @@ void ChaoticGoodConnector::OnHandshakeDone(void* arg, grpc_error_handle error) {
},
EventEngineWakeupScheduler(self->event_engine_),
[self](absl::Status status) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "ChaoticGoodConnector::OnHandshakeDone: %s",
status.ToString().c_str());
}

@ -259,7 +259,7 @@ auto ChaoticGoodClientTransport::CallOutboundLoop(uint32_t stream_id,
// Wait for initial metadata then send it out.
call_handler.PullClientInitialMetadata(),
[send_fragment](ClientMetadataHandle md) mutable {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: Sending initial metadata: %s",
md->DebugString().c_str());
}
@ -298,7 +298,7 @@ void ChaoticGoodClientTransport::StartCall(CallHandler call_handler) {
const uint32_t stream_id = MakeStream(call_handler);
return Map(CallOutboundLoop(stream_id, call_handler),
[stream_id, this](absl::Status result) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: Call %d finished with %s",
stream_id, result.ToString().c_str());
}

@ -98,7 +98,7 @@ ChaoticGoodServerListener::~ChaoticGoodServerListener() {
absl::StatusOr<int> ChaoticGoodServerListener::Bind(
grpc_event_engine::experimental::EventEngine::ResolvedAddress addr) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
auto str = grpc_event_engine::experimental::ResolvedAddressToString(addr);
LOG(INFO) << "CHAOTIC_GOOD: Listen on "
<< (str.ok() ? str->c_str() : str.status().ToString());
@ -139,9 +139,9 @@ absl::Status ChaoticGoodServerListener::StartListening() {
CHECK(ee_listener_ != nullptr);
auto status = ee_listener_->Start();
if (!status.ok()) {
LOG(ERROR) << "Start listening failed: " << status.ToString();
} else if (grpc_chaotic_good_trace.enabled()) {
LOG(INFO) << "CHAOTIC_GOOD: Started listening";
LOG(ERROR) << "Start listening failed: " << status;
} else {
GRPC_TRACE_LOG(chaotic_good, INFO) << "CHAOTIC_GOOD: Started listening";
}
return status;
}
@ -159,9 +159,7 @@ ChaoticGoodServerListener::ActiveConnection::~ActiveConnection() {
}
void ChaoticGoodServerListener::ActiveConnection::Orphan() {
if (grpc_chaotic_good_trace.enabled()) {
LOG(INFO) << "ActiveConnection::Orphan() " << this;
}
GRPC_TRACE_LOG(chaotic_good, INFO) << "ActiveConnection::Orphan() " << this;
if (handshaking_state_ != nullptr) {
handshaking_state_->Shutdown();
handshaking_state_.reset();
@ -299,7 +297,7 @@ auto ChaoticGoodServerListener::ActiveConnection::HandshakingState::
},
[self](PromiseEndpoint ret) -> absl::Status {
MutexLock lock(&self->connection_->listener_->mu_);
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(
GPR_INFO, "%p Data endpoint setup done: shutdown=%s",
self->connection_.get(),
@ -455,9 +453,7 @@ Timestamp ChaoticGoodServerListener::ActiveConnection::HandshakingState::
}
void ChaoticGoodServerListener::Orphan() {
if (grpc_chaotic_good_trace.enabled()) {
LOG(INFO) << "ChaoticGoodServerListener::Orphan()";
}
GRPC_TRACE_LOG(chaotic_good, INFO) << "ChaoticGoodServerListener::Orphan()";
{
absl::flat_hash_set<OrphanablePtr<ActiveConnection>> connection_list;
MutexLock lock(&mu_);

@ -74,7 +74,7 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
CallInitiator call_initiator, ClientFragmentFrame frame,
uint32_t stream_id) {
DCHECK(frame.headers == nullptr);
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: PushFragmentIntoCall: frame=%s",
frame.ToString().c_str());
}
@ -87,7 +87,7 @@ auto ChaoticGoodServerTransport::PushFragmentIntoCall(
[]() -> StatusFlag { return Success{}; }),
[this, call_initiator, end_of_stream = frame.end_of_stream,
stream_id](StatusFlag status) mutable -> StatusFlag {
if (!status.ok() && grpc_chaotic_good_trace.enabled()) {
if (!status.ok() && GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: Failed PushFragmentIntoCall");
}
if (end_of_stream || !status.ok()) {
@ -135,7 +135,7 @@ auto ChaoticGoodServerTransport::MaybePushFragmentIntoCall(
auto ChaoticGoodServerTransport::SendFragment(
ServerFragmentFrame frame, MpscSender<ServerFrame> outgoing_frames,
CallInitiator call_initiator) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO, "CHAOTIC_GOOD: SendFragment: frame=%s",
frame.ToString().c_str());
}
@ -187,7 +187,7 @@ auto ChaoticGoodServerTransport::SendCallInitialMetadataAndBody(
call_initiator.PullServerInitialMetadata(),
[stream_id, outgoing_frames, call_initiator,
this](absl::optional<ServerMetadataHandle> md) mutable {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO,
"CHAOTIC_GOOD: SendCallInitialMetadataAndBody: md=%s",
md.has_value() ? (*md)->DebugString().c_str() : "null");
@ -213,7 +213,7 @@ auto ChaoticGoodServerTransport::CallOutboundLoop(
return Seq(Map(SendCallInitialMetadataAndBody(stream_id, outgoing_frames,
call_initiator),
[stream_id](absl::Status main_body_result) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_DEBUG,
"CHAOTIC_GOOD: CallOutboundLoop: stream_id=%d "
"main_body_result=%s",
@ -345,7 +345,7 @@ auto ChaoticGoodServerTransport::OnTransportActivityDone(
absl::string_view activity) {
return [self = RefAsSubclass<ChaoticGoodServerTransport>(),
activity](absl::Status status) {
if (grpc_chaotic_good_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(chaotic_good)) {
gpr_log(GPR_INFO,
"CHAOTIC_GOOD: OnTransportActivityDone: activity=%s status=%s",
std::string(activity).c_str(), status.ToString().c_str());

@ -64,7 +64,6 @@
#include "src/core/ext/transport/chttp2/transport/frame_rst_stream.h"
#include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.h"
@ -95,7 +94,6 @@
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/resource_quota/trace.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/slice/slice_internal.h"
@ -143,7 +141,6 @@ static bool g_default_server_keepalive_permit_without_calls = false;
#define GRPC_ARG_HTTP_TARPIT_MAX_DURATION_MS "grpc.http.tarpit_max_duration_ms"
#define MAX_CLIENT_STREAM_ID 0x7fffffffu
grpc_core::TraceFlag grpc_keepalive_trace(false, "http_keepalive");
// forward declarations of various callbacks that we'll build closures around
static void write_action_begin_locked(
@ -827,10 +824,9 @@ grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
t->streams_allocated.fetch_add(1, std::memory_order_relaxed);
if (server_data) {
id = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(server_data));
if (grpc_http_trace.enabled()) {
VLOG(2) << "HTTP:" << t << "/" << this << " creating accept stream " << id
GRPC_TRACE_VLOG(http, 2)
<< "HTTP:" << t << "/" << this << " creating accept stream " << id
<< " [from " << server_data << "]";
}
*t->accepting_stream = this;
t->stream_map.emplace(id, this);
post_destructive_reclaimer(t);
@ -1061,10 +1057,9 @@ static void write_action(grpc_chttp2_transport* t) {
if (max_frame_size == 0) {
max_frame_size = INT_MAX;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t << "]: Write "
GRPC_TRACE_LOG(http2_ping, INFO)
<< (t->is_client ? "CLIENT" : "SERVER") << "[" << t << "]: Write "
<< t->outbuf.Length() << " bytes";
}
t->write_size_policy.BeginWrite(t->outbuf.Length());
grpc_endpoint_write(t->ep, t->outbuf.c_slice_buffer(),
grpc_core::InitTransportClosure<write_action_end>(
@ -1075,10 +1070,8 @@ static void write_action(grpc_chttp2_transport* t) {
static void write_action_end(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
auto* tp = t.get();
if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
LOG(INFO) << (t->is_client ? "CLIENT" : "SERVER") << "[" << t.get()
<< "]: Finish write";
}
GRPC_TRACE_LOG(http2_ping, INFO) << (t->is_client ? "CLIENT" : "SERVER")
<< "[" << t.get() << "]: Finish write";
tp->combiner->Run(grpc_core::InitTransportClosure<write_action_end_locked>(
std::move(t), &tp->write_action_end_locked),
error);
@ -1295,7 +1288,7 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
@ -1365,7 +1358,7 @@ static void perform_stream_op_locked(void* stream_op,
s->traced = op->is_traced;
s->call_tracer = CallTracerIfSampled(s);
s->tcp_tracer = TcpTracerIfSampled(s);
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO,
"perform_stream_op_locked[s=%p; op=%p]: %s; on_complete = %p", s,
op, grpc_transport_stream_op_batch_string(op, false).c_str(),
@ -1632,7 +1625,7 @@ void grpc_chttp2_transport::PerformStreamOp(
}
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_stream_op[s=%p; op=%p]: %s", s, op,
grpc_transport_stream_op_batch_string(op, false).c_str());
}
@ -1976,7 +1969,7 @@ static void perform_transport_op_locked(void* stream_op,
}
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", this,
grpc_transport_op_string(op).c_str());
}
@ -2029,7 +2022,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
// Lambda is immediately invoked as a big scoped section that can be
// exited out of at any point by returning.
[&]() {
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_message %p final_metadata_requested=%d "
"seen_error=%d",
@ -2045,7 +2038,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
int64_t min_progress_size;
auto r = grpc_deframe_unprocessed_incoming_frames(
s, &min_progress_size, &**s->recv_message, s->recv_message_flags);
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG, "Deframe data frame: %s",
grpc_core::PollToString(r, [](absl::Status r) {
return r.ToString();
@ -2101,7 +2094,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) {
grpc_chttp2_maybe_complete_recv_message(t, s);
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_DEBUG,
"maybe_complete_recv_trailing_metadata cli=%d s=%p closure=%p "
"read_closed=%d "
@ -2312,7 +2305,7 @@ grpc_chttp2_transport::RemovedStreamHandle grpc_chttp2_mark_stream_closed(
grpc_chttp2_transport* t, grpc_chttp2_stream* s, int close_reads,
int close_writes, grpc_error_handle error) {
grpc_chttp2_transport::RemovedStreamHandle rsh;
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(
GPR_DEBUG, "MARK_STREAM_CLOSED: t=%p s=%p(id=%d) %s [%s]", t, s, s->id,
(close_reads && close_writes)
@ -2743,8 +2736,8 @@ static void read_action_locked(
// got an incoming read, cancel any pending keepalive timers
t->keepalive_incoming_data_wanted = false;
if (t->keepalive_ping_timeout_handle != TaskHandle::kInvalid) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO,
"%s[%p]: Clear keepalive timer because data was received",
t->is_client ? "CLIENT" : "SERVER", t.get());
@ -2797,7 +2790,7 @@ static void start_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void start_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
@ -2824,7 +2817,7 @@ static void finish_bdp_ping(grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
static void finish_bdp_ping_locked(
grpc_core::RefCountedPtr<grpc_chttp2_transport> t,
grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s",
std::string(t->peer_string.as_string_view()).c_str(),
grpc_core::StatusToString(error).c_str());
@ -2968,8 +2961,8 @@ static void finish_keepalive_ping_locked(
grpc_error_handle error) {
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Finish keepalive ping",
std::string(t->peer_string.as_string_view()).c_str());
}
@ -2990,8 +2983,8 @@ static void maybe_reset_keepalive_ping_timer_locked(grpc_chttp2_transport* t) {
t->event_engine->Cancel(t->keepalive_ping_timer_handle)) {
// Cancel succeeds, resets the keepalive ping timer. Note that we don't
// need to Ref or Unref here since we still hold the Ref.
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s: Keepalive ping cancelled. Resetting timer.",
std::string(t->peer_string.as_string_view()).c_str());
}
@ -3090,7 +3083,7 @@ static void benign_reclaimer_locked(
if (error.ok() && t->stream_map.empty()) {
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
std::string(t->peer_string.as_string_view()).c_str());
}
@ -3099,7 +3092,7 @@ static void benign_reclaimer_locked(
grpc_core::StatusIntProperty::kHttp2Error,
GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
@ -3119,7 +3112,7 @@ static void destructive_reclaimer_locked(
if (error.ok() && !t->stream_map.empty()) {
// As stream_map is a hash map, this selects effectively a random stream.
grpc_chttp2_stream* s = t->stream_map.begin()->second;
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d",
std::string(t->peer_string.as_string_view()).c_str(), s->id);
}

@ -40,11 +40,6 @@
#include "src/core/lib/transport/transport.h"
#include "src/core/telemetry/call_tracer.h"
extern grpc_core::TraceFlag grpc_keepalive_trace;
extern grpc_core::TraceFlag grpc_trace_http2_stream_state;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_hpack_parser;
/// Creates a CHTTP2 Transport. This takes ownership of a \a resource_user ref
/// from the caller; if the caller still needs the resource_user after creating
/// a transport, the caller must take another ref.

@ -40,8 +40,6 @@
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/util/useful.h"
grpc_core::TraceFlag grpc_flowctl_trace(false, "flowctl");
namespace grpc_core {
namespace chttp2 {
@ -235,7 +233,7 @@ void TransportFlowControl::UpdateSetting(
FlowControlAction& (FlowControlAction::*set)(FlowControlAction::Urgency,
uint32_t)) {
if (new_desired_value != *desired_value) {
if (grpc_flowctl_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(flowctl)) {
gpr_log(GPR_INFO, "[flowctl] UPDATE SETTING %s from %" PRId64 " to %d",
std::string(name).c_str(), *desired_value, new_desired_value);
}

@ -41,8 +41,6 @@
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/transport/bdp_estimator.h"
extern grpc_core::TraceFlag grpc_flowctl_trace;
namespace grpc {
namespace testing {
class TrickledCHTTP2; // to make this a friend

@ -37,9 +37,6 @@
#include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
#include "src/core/lib/debug/trace.h"
extern grpc_core::TraceFlag grpc_keepalive_trace;
extern grpc_core::TraceFlag grpc_http_trace;
grpc_slice grpc_chttp2_ping_create(uint8_t ack, uint64_t opaque_8bytes) {
grpc_slice slice = GRPC_SLICE_MALLOC(9 + 8);
uint8_t* p = GRPC_SLICE_START_PTR(slice);
@ -96,7 +93,7 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (p->byte == 8) {
CHECK(is_last);
if (p->is_ack) {
if (grpc_ping_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
gpr_log(GPR_INFO, "%s[%p]: received ping ack %" PRIx64,
t->is_client ? "CLIENT" : "SERVER", t, p->opaque_8bytes);
}
@ -105,7 +102,8 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (!t->is_client) {
const bool transport_idle =
t->keepalive_permit_without_calls == 0 && t->stream_map.empty();
if (grpc_keepalive_trace.enabled() || grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "SERVER[%p]: received ping %" PRIx64 ": %s", t,
p->opaque_8bytes,
t->ping_abuse_policy.GetDebugString(transport_idle).c_str());
@ -113,7 +111,7 @@ grpc_error_handle grpc_chttp2_ping_parser_parse(void* parser,
if (t->ping_abuse_policy.ReceivedOnePing(transport_idle)) {
grpc_chttp2_exceeded_ping_strikes(t);
}
} else if (grpc_ping_trace.enabled()) {
} else if (GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
gpr_log(GPR_INFO, "CLIENT[%p]: received ping %" PRIx64, t,
p->opaque_8bytes);
}

@ -30,7 +30,6 @@
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/ping_callbacks.h"
@ -111,7 +110,7 @@ grpc_error_handle grpc_chttp2_rst_stream_parser_parse(void* parser,
((static_cast<uint32_t>(p->reason_bytes[1])) << 16) |
((static_cast<uint32_t>(p->reason_bytes[2])) << 8) |
((static_cast<uint32_t>(p->reason_bytes[3])));
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO,
"[chttp2 transport=%p stream=%p] received RST_STREAM(reason=%d)",
t, s, reason);

@ -33,7 +33,6 @@
#include "src/core/ext/transport/chttp2/transport/flow_control.h"
#include "src/core/ext/transport/chttp2/transport/frame_goaway.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/lib/debug/trace.h"
@ -171,8 +170,8 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
t->initial_window_update +=
static_cast<int64_t>(parser->value) -
parser->incoming_settings->initial_window_size();
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_flowctl_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(flowctl)) {
gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
t->is_client ? "cli" : "svr",
static_cast<int>(t->initial_window_update));
@ -188,7 +187,7 @@ grpc_error_handle grpc_chttp2_settings_parser_parse(void* p,
"invalid value %u passed for %s", parser->value,
grpc_core::Http2Settings::WireIdToName(parser->id).c_str()));
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR",
std::string(t->peer_string.as_string_view()).c_str(),

@ -31,7 +31,6 @@
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
#include "src/core/ext/transport/chttp2/transport/hpack_encoder_table.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/debug/trace.h"
@ -118,7 +117,7 @@ void HPackCompressor::SetMaxUsableSize(uint32_t max_table_size) {
void HPackCompressor::SetMaxTableSize(uint32_t max_table_size) {
if (table_.SetMaxSize(std::min(max_usable_size_, max_table_size))) {
advertise_table_size_change_ = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "set max table size from encoder to %d",
max_table_size);
}

@ -59,8 +59,6 @@
namespace grpc_core {
TraceFlag grpc_trace_chttp2_hpack_parser(false, "chttp2_hpack_parser");
namespace {
// The alphabet used for base64 encoding binary metadata.
constexpr char kBase64Alphabet[] =
@ -733,7 +731,7 @@ class HPackParser::Parser {
bool FinishHeaderAndAddToTable(HPackTable::Memento md) {
// Log if desired
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_hpack_parser)) {
if (GRPC_TRACE_FLAG_ENABLED(chttp2_hpack_parser)) {
LogHeader(md);
}
// Emit whilst we own the metadata.
@ -758,7 +756,7 @@ class HPackParser::Parser {
void FinishHeaderOmitFromTable(const HPackTable::Memento& md) {
// Log if desired
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_hpack_parser)) {
if (GRPC_TRACE_FLAG_ENABLED(chttp2_hpack_parser)) {
LogHeader(md);
}
EmitHeader(md);

@ -35,7 +35,6 @@
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
#include "src/core/ext/transport/chttp2/transport/hpack_parse_result.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/slice/slice.h"
@ -99,9 +98,7 @@ void HPackTable::SetMaxBytes(uint32_t max_bytes) {
if (max_bytes_ == max_bytes) {
return;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
LOG(INFO) << "Update hpack parser max size to " << max_bytes;
}
GRPC_TRACE_LOG(http, INFO) << "Update hpack parser max size to " << max_bytes;
while (mem_used_ > max_bytes) {
EvictOne();
}
@ -111,9 +108,7 @@ void HPackTable::SetMaxBytes(uint32_t max_bytes) {
bool HPackTable::SetCurrentTableSize(uint32_t bytes) {
if (current_table_bytes_ == bytes) return true;
if (bytes > max_bytes_) return false;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
LOG(INFO) << "Update hpack parser table size to " << bytes;
}
GRPC_TRACE_LOG(http, INFO) << "Update hpack parser table size to " << bytes;
while (mem_used_ > bytes) {
EvictOne();
}

@ -1,19 +0,0 @@
// Copyright 2022 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_http_trace(false, "http");

@ -1,24 +0,0 @@
// Copyright 2022 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP_TRACE_H
#define GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP_TRACE_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
extern grpc_core::TraceFlag grpc_http_trace;
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP_TRACE_H

@ -807,11 +807,10 @@ void grpc_chttp2_settings_timeout(
#define GRPC_CHTTP2_CLIENT_CONNECT_STRLEN \
(sizeof(GRPC_CHTTP2_CLIENT_CONNECT_STRING) - 1)
// extern grpc_core::TraceFlag grpc_flowctl_trace;
//
#define GRPC_CHTTP2_IF_TRACING(stmt) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) { \
if (GRPC_TRACE_FLAG_ENABLED(http)) { \
(stmt); \
} \
} while (0)

@ -54,7 +54,6 @@
#include "src/core/ext/transport/chttp2/transport/hpack_parser.h"
#include "src/core/ext/transport/chttp2/transport/hpack_parser_table.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.h"
@ -78,8 +77,6 @@
using grpc_core::HPackParser;
grpc_core::TraceFlag grpc_trace_chttp2_new_stream(false, "chttp2_new_stream");
static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t,
size_t& requests_started);
static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
@ -334,7 +331,7 @@ absl::variant<size_t, absl::Status> grpc_chttp2_perform_read(
case GRPC_DTS_FH_8:
DCHECK_LT(cur, end);
t->incoming_stream_id |= (static_cast<uint32_t>(*cur));
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_INFO, "INCOMING[%p]: %s len:%d id:0x%08x", t,
FrameTypeString(t->incoming_frame_type, t->incoming_frame_flags)
.c_str(),
@ -455,7 +452,7 @@ static grpc_error_handle init_frame_parser(grpc_chttp2_transport* t,
case GRPC_CHTTP2_FRAME_GOAWAY:
return init_goaway_parser(t);
default:
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
}
return init_non_header_skip_frame_parser(t);
@ -715,8 +712,8 @@ static grpc_error_handle init_header_frame_parser(grpc_chttp2_transport* t,
gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
return init_header_skip_frame_parser(t, priority_type, is_eoh);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_new_stream)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(chttp2_new_stream)) {
gpr_log(GPR_INFO,
"[t:%p fd:%d peer:%s] Accepting new stream; "
"num_incoming_streams_before_settings_ack=%u",
@ -796,7 +793,7 @@ static grpc_error_handle init_window_update_frame_parser(
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (s == nullptr) {
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_ERROR, "Stream %d not found, ignoring WINDOW_UPDATE",
t->incoming_stream_id);
}
@ -888,18 +885,17 @@ static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
const grpc_slice& slice,
int is_last) {
grpc_chttp2_stream* s = t->incoming_stream;
if (grpc_http_trace.enabled()) {
VLOG(2) << "INCOMING[" << t << ";" << s << "]: Parse "
<< GRPC_SLICE_LENGTH(slice) << "b " << (is_last ? "last " : "")
<< "frame fragment with " << t->parser.name;
}
GRPC_TRACE_VLOG(http, 2) << "INCOMING[" << t << ";" << s << "]: Parse "
<< GRPC_SLICE_LENGTH(slice) << "b "
<< (is_last ? "last " : "") << "frame fragment with "
<< t->parser.name;
grpc_error_handle err =
t->parser.parser(t->parser.user_data, t, s, slice, is_last);
intptr_t unused;
if (GPR_LIKELY(err.ok())) {
return err;
}
if (grpc_http_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(http)) {
gpr_log(GPR_ERROR, "INCOMING[%p;%p]: Parse failed with %s", t, s,
err.ToString().c_str());
}

@ -23,8 +23,6 @@
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_ping_trace(false, "http2_ping");
namespace grpc_core {
void Chttp2PingCallbacks::OnPing(Callback on_start, Callback on_ack) {

@ -33,8 +33,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/time.h"
extern grpc_core::TraceFlag grpc_ping_trace;
namespace grpc_core {
class Chttp2PingCallbacks {

@ -44,8 +44,6 @@ static const char* stream_list_id_string(grpc_chttp2_stream_list_id id) {
GPR_UNREACHABLE_CODE(return "unknown");
}
grpc_core::TraceFlag grpc_trace_http2_stream_state(false, "http2_stream_state");
// core list management
static bool stream_list_empty(grpc_chttp2_transport* t,
@ -70,7 +68,7 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
s->included.clear(id);
}
*stream = s;
if (s && GRPC_TRACE_FLAG_ENABLED(grpc_trace_http2_stream_state)) {
if (s && GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
@ -92,7 +90,7 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
} else {
t->lists[id].tail = s->links[id].prev;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_http2_stream_state)) {
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}
@ -124,7 +122,7 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
}
t->lists[id].tail = s;
s->included.set(id);
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_http2_stream_state)) {
if (GRPC_TRACE_FLAG_ENABLED(http2_stream_state)) {
gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id));
}

@ -47,7 +47,6 @@
#include "src/core/ext/transport/chttp2/transport/frame_window_update.h"
#include "src/core/ext/transport/chttp2/transport/hpack_encoder.h"
#include "src/core/ext/transport/chttp2/transport/http2_settings.h"
#include "src/core/ext/transport/chttp2/transport/http_trace.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/ext/transport/chttp2/transport/legacy_frame.h"
#include "src/core/ext/transport/chttp2/transport/max_concurrent_streams_policy.h"
@ -133,10 +132,10 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->channelz_socket->RecordKeepaliveSent();
}
grpc_core::global_stats().IncrementHttp2PingsSent();
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
gpr_log(GPR_INFO, "%s[%p]: Ping %" PRIx64 " sent [%s]: %s",
t->is_client ? "CLIENT" : "SERVER", t, id,
std::string(t->peer_string.as_string_view()).c_str(),
@ -145,10 +144,10 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
},
[t](grpc_core::Chttp2PingRatePolicy::TooManyRecentPings) {
// need to receive something of substance before sending a ping again
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
gpr_log(GPR_INFO,
"%s[%p]: Ping delayed [%s]: too many recent pings: %s",
t->is_client ? "CLIENT" : "SERVER", t,
@ -158,10 +157,10 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
},
[t](grpc_core::Chttp2PingRatePolicy::TooSoon too_soon) {
// not enough elapsed time between successive pings
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http) ||
GRPC_TRACE_FLAG_ENABLED(bdp_estimator) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive) ||
GRPC_TRACE_FLAG_ENABLED(http2_ping)) {
gpr_log(
GPR_INFO,
"%s[%p]: Ping delayed [%s]: not enough time elapsed since last "
@ -207,7 +206,7 @@ static bool update_list(grpc_chttp2_transport* t, int64_t send_bytes,
static void report_stall(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
const char* staller) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_flowctl_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(flowctl)) {
gpr_log(
GPR_DEBUG,
"%s:%p stream %d moved to stalled list by %s. This is FULLY expected "
@ -728,7 +727,7 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
grpc_core::ExecCtx exec_ctx;
grpc_chttp2_ping_timeout(t);
});
if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace) && id.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) && id.has_value()) {
gpr_log(GPR_INFO,
"%s[%p]: Set ping timeout timer of %s for ping id %" PRIx64,
t->is_client ? "CLIENT" : "SERVER", t, timeout.ToString().c_str(),
@ -740,8 +739,8 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
t->keepalive_ping_timeout_handle !=
grpc_event_engine::experimental::EventEngine::TaskHandle::
kInvalid) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_ping_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(http2_ping) ||
GRPC_TRACE_FLAG_ENABLED(http_keepalive)) {
gpr_log(GPR_INFO, "%s[%p]: Set keepalive ping timeout timer of %s",
t->is_client ? "CLIENT" : "SERVER", t,
t->keepalive_timeout.ToString().c_str());

@ -67,10 +67,9 @@
#define GRPC_HEADER_SIZE_IN_BYTES 5
#define GRPC_FLUSH_READ_SIZE 4096
grpc_core::TraceFlag grpc_cronet_trace(false, "cronet");
#define CRONET_LOG(...) \
do { \
if (grpc_cronet_trace.enabled()) gpr_log(__VA_ARGS__); \
if (GRPC_TRACE_FLAG_ENABLED(cronet)) gpr_log(__VA_ARGS__); \
} while (0)
enum e_op_result {

@ -1,23 +0,0 @@
//
//
// Copyright 2017 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
grpc_core::TraceFlag grpc_inproc_trace(false, "inproc");

@ -25,8 +25,6 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
const grpc_channel_args* args,
void* reserved);
extern grpc_core::TraceFlag grpc_inproc_trace;
namespace grpc_core {
std::pair<OrphanablePtr<Transport>, OrphanablePtr<Transport>>

@ -71,7 +71,7 @@
#define INPROC_LOG(...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_inproc_trace)) { \
if (GRPC_TRACE_FLAG_ENABLED(inproc)) { \
gpr_log(__VA_ARGS__); \
} \
} while (0)
@ -352,7 +352,7 @@ class CopySink {
void fill_in_metadata(inproc_stream* s, const grpc_metadata_batch* metadata,
grpc_metadata_batch* out_md, bool* markfilled) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_inproc_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(inproc)) {
log_metadata(metadata, s->t->is_client,
metadata->get_pointer(grpc_core::WaitForReady()) != nullptr);
}
@ -949,7 +949,7 @@ void inproc_transport::PerformStreamOp(grpc_stream* gs,
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_inproc_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(inproc)) {
if (op->send_initial_metadata) {
log_metadata(op->payload->send_initial_metadata.send_initial_metadata,
s->t->is_client, true);

@ -24,6 +24,4 @@ grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
const grpc_channel_args* args,
void* reserved);
extern grpc_core::TraceFlag grpc_inproc_trace;
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_INPROC_LEGACY_INPROC_TRANSPORT_H

@ -43,8 +43,6 @@
namespace grpc_core {
TraceFlag grpc_handshaker_trace(false, "handshaker");
namespace {
using ::grpc_event_engine::experimental::EventEngine;
@ -62,13 +60,12 @@ std::string HandshakerArgsString(HandshakerArgs* args) {
} // namespace
HandshakeManager::HandshakeManager()
: RefCounted(GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)
? "HandshakeManager"
: RefCounted(GRPC_TRACE_FLAG_ENABLED(handshaker) ? "HandshakeManager"
: nullptr) {}
void HandshakeManager::Add(RefCountedPtr<Handshaker> handshaker) {
MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
@ -94,7 +91,7 @@ void HandshakeManager::Shutdown(grpc_error_handle why) {
// on_handshake_done callback.
// Returns true if we've scheduled the on_handshake_done callback.
bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
", args=%s",
@ -123,7 +120,7 @@ bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
}
args_.args = ChannelArgs();
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(GPR_INFO,
"handshake_manager %p: handshaking complete -- scheduling "
"on_handshake_done with error=%s",
@ -136,7 +133,7 @@ bool HandshakeManager::CallNextHandshakerLocked(grpc_error_handle error) {
is_shutdown_ = true;
} else {
auto handshaker = handshakers_[index_];
if (GRPC_TRACE_FLAG_ENABLED(grpc_handshaker_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(handshaker)) {
gpr_log(
GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,

@ -51,7 +51,6 @@
#include "src/core/lib/resource_quota/api.h"
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/resource_quota/trace.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/tsi/transport_security_grpc.h"
@ -146,8 +145,6 @@ struct secure_endpoint {
};
} // namespace
grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint");
static void destroy(secure_endpoint* ep) { delete ep; }
#ifndef NDEBUG
@ -157,7 +154,7 @@ static void destroy(secure_endpoint* ep) { delete ep; }
secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
static void secure_endpoint_unref(secure_endpoint* ep, const char* reason,
const char* file, int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_secure_endpoint)) {
if (GRPC_TRACE_FLAG_ENABLED(secure_endpoint)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SECENDP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
@ -170,7 +167,7 @@ static void secure_endpoint_unref(secure_endpoint* ep, const char* reason,
static void secure_endpoint_ref(secure_endpoint* ep, const char* reason,
const char* file, int line) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_secure_endpoint)) {
if (GRPC_TRACE_FLAG_ENABLED(secure_endpoint)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"SECENDP ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
@ -198,7 +195,7 @@ static void maybe_post_reclaimer(secure_endpoint* ep) {
grpc_core::ReclamationPass::kBenign,
[ep](absl::optional<grpc_core::ReclamationSweep> sweep) {
if (sweep.has_value()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(resource_quota)) {
gpr_log(GPR_INFO,
"secure endpoint: benign reclamation to free memory");
}
@ -235,7 +232,7 @@ static void flush_read_staging_buffer(secure_endpoint* ep, uint8_t** cur,
}
static void call_read_cb(secure_endpoint* ep, grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_secure_endpoint) &&
if (GRPC_TRACE_FLAG_ENABLED(secure_endpoint) &&
gpr_should_log(GPR_LOG_SEVERITY_INFO)) {
size_t i;
for (i = 0; i < ep->read_buffer->count; i++) {
@ -403,7 +400,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_secure_endpoint) &&
if (GRPC_TRACE_FLAG_ENABLED(secure_endpoint) &&
gpr_should_log(GPR_LOG_SEVERITY_INFO)) {
for (i = 0; i < slices->count; i++) {
char* data =

@ -28,8 +28,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/endpoint.h"
extern grpc_core::TraceFlag grpc_trace_secure_endpoint;
// Takes ownership of protector, zero_copy_protector, and to_wrap, and refs
// leftover_slices. If zero_copy_protector is not NULL, protector will never be
// used.

@ -31,14 +31,11 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_fwd.h"
#include "src/core/lib/channel/channel_stack_trace.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/util/alloc.h"
using grpc_event_engine::experimental::EventEngine;
grpc_core::TraceFlag grpc_trace_channel(false, "channel");
static int register_get_name_fn = []() {
grpc_core::NameFromChannelFilter = [](const grpc_channel_filter* filter) {
return filter->name;
@ -121,7 +118,7 @@ grpc_error_handle grpc_channel_stack_init(
const grpc_channel_filter** filters, size_t filter_count,
const grpc_core::ChannelArgs& channel_args, const char* name,
grpc_channel_stack* stack) {
if (grpc_trace_channel_stack.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel_stack)) {
LOG(INFO) << "CHANNEL_STACK: init " << name;
for (size_t i = 0; i < filter_count; i++) {
LOG(INFO) << "CHANNEL_STACK: filter " << filters[i]->name;

@ -364,11 +364,9 @@ void grpc_call_log_op(const char* file, int line, gpr_log_severity severity,
void grpc_channel_stack_no_post_init(grpc_channel_stack* stk,
grpc_channel_element* elem);
extern grpc_core::TraceFlag grpc_trace_channel;
#define GRPC_CALL_LOG_OP(sev, elem, op) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_channel)) { \
if (GRPC_TRACE_FLAG_ENABLED(channel)) { \
grpc_call_log_op(sev, elem, op); \
} \
} while (0)

@ -46,7 +46,6 @@
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"

@ -1,19 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/channel/channel_stack_trace.h"
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_trace_channel_stack(false, "channel_stack");

@ -1,24 +0,0 @@
// Copyright 2023 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_CHANNEL_CHANNEL_STACK_TRACE_H
#define GRPC_SRC_CORE_LIB_CHANNEL_CHANNEL_STACK_TRACE_H
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/trace.h"
extern grpc_core::TraceFlag grpc_trace_channel_stack;
#endif // GRPC_SRC_CORE_LIB_CHANNEL_CHANNEL_STACK_TRACE_H

@ -72,7 +72,6 @@
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"

@ -40,8 +40,6 @@
#include "src/core/lib/promise/seq.h"
#include "src/core/lib/slice/slice.h"
extern grpc_core::TraceFlag grpc_trace_channel;
namespace grpc_core {
namespace promise_filter_detail {
@ -212,7 +210,7 @@ void BaseCallData::CapturedBatch::ResumeWith(Flusher* releaser) {
uintptr_t& refcnt = *RefCountField(batch);
if (refcnt == 0) {
// refcnt==0 ==> cancelled
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%sRESUME BATCH REQUEST CANCELLED",
releaser->call()->DebugTag().c_str());
}
@ -268,7 +266,7 @@ BaseCallData::Flusher::~Flusher() {
auto* batch = static_cast<grpc_transport_stream_op_batch*>(p);
BaseCallData* call =
static_cast<BaseCallData*>(batch->handler_private.extra_arg);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "FLUSHER:forward batch via closure: %s",
grpc_transport_stream_op_batch_string(batch, false).c_str());
}
@ -280,7 +278,7 @@ BaseCallData::Flusher::~Flusher() {
if (call_->call() != nullptr && call_->call()->traced()) {
batch->is_traced = true;
}
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(
GPR_INFO, "FLUSHER:queue batch to forward in closure: %s",
grpc_transport_stream_op_batch_string(release_[i], false).c_str());
@ -293,7 +291,7 @@ BaseCallData::Flusher::~Flusher() {
"flusher_batch");
}
call_closures_.RunClosuresWithoutYielding(call_->call_combiner());
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "FLUSHER:forward batch: %s",
grpc_transport_stream_op_batch_string(release_[0], false).c_str());
}
@ -334,7 +332,7 @@ const char* BaseCallData::SendMessage::StateString(State state) {
}
void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s SendMessage.StartOp st=%s", base_->LogTag().c_str(),
StateString(state_));
}
@ -362,7 +360,7 @@ void BaseCallData::SendMessage::StartOp(CapturedBatch batch) {
template <typename T>
void BaseCallData::SendMessage::GotPipe(T* pipe_end) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s SendMessage.GotPipe st=%s", base_->LogTag().c_str(),
StateString(state_));
}
@ -410,7 +408,7 @@ bool BaseCallData::SendMessage::IsIdle() const {
void BaseCallData::SendMessage::OnComplete(absl::Status status) {
Flusher flusher(base_);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s SendMessage.OnComplete st=%s status=%s",
base_->LogTag().c_str(), StateString(state_),
status.ToString().c_str());
@ -441,7 +439,7 @@ void BaseCallData::SendMessage::OnComplete(absl::Status status) {
void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s SendMessage.Done st=%s md=%s",
base_->LogTag().c_str(), StateString(state_),
metadata.DebugString().c_str());
@ -484,7 +482,7 @@ void BaseCallData::SendMessage::Done(const ServerMetadata& metadata,
void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher,
bool allow_push_to_pipe) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s SendMessage.WakeInsideCombiner st=%s%s",
base_->LogTag().c_str(), StateString(state_),
state_ == State::kBatchCompleted
@ -518,7 +516,7 @@ void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher,
CHECK(push_.has_value());
auto r_push = (*push_)();
if (auto* p = r_push.value_if_ready()) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s SendMessage.WakeInsideCombiner push complete, result=%s",
base_->LogTag().c_str(), *p ? "true" : "false");
@ -532,7 +530,7 @@ void BaseCallData::SendMessage::WakeInsideCombiner(Flusher* flusher,
CHECK(next_.has_value());
auto r_next = (*next_)();
if (auto* p = r_next.value_if_ready()) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s SendMessage.WakeInsideCombiner next complete, "
"result.has_value=%s",
@ -617,7 +615,7 @@ const char* BaseCallData::ReceiveMessage::StateString(State state) {
}
void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ReceiveMessage.StartOp st=%s",
base_->LogTag().c_str(), StateString(state_));
}
@ -658,7 +656,7 @@ void BaseCallData::ReceiveMessage::StartOp(CapturedBatch& batch) {
template <typename T>
void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ReceiveMessage.GotPipe st=%s",
base_->LogTag().c_str(), StateString(state_));
}
@ -694,7 +692,7 @@ void BaseCallData::ReceiveMessage::GotPipe(T* pipe_end) {
}
void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ReceiveMessage.OnComplete st=%s status=%s",
base_->LogTag().c_str(), StateString(state_),
status.ToString().c_str());
@ -735,7 +733,7 @@ void BaseCallData::ReceiveMessage::OnComplete(absl::Status status) {
void BaseCallData::ReceiveMessage::Done(const ServerMetadata& metadata,
Flusher* flusher) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ReceiveMessage.Done st=%s md=%s",
base_->LogTag().c_str(), StateString(state_),
metadata.DebugString().c_str());
@ -794,7 +792,7 @@ void BaseCallData::ReceiveMessage::Done(const ServerMetadata& metadata,
void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
bool allow_push_to_pipe) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ReceiveMessage.WakeInsideCombiner st=%s push?=%s next?=%s "
"allow_push_to_pipe=%s",
@ -856,7 +854,7 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
CHECK(push_.has_value());
auto r_push = (*push_)();
if (auto* p = r_push.value_if_ready()) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ReceiveMessage.WakeInsideCombiner push complete: %s",
base_->LogTag().c_str(), *p ? "true" : "false");
@ -887,7 +885,7 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
p->cancelled() ? absl::CancelledError() : absl::OkStatus(),
"recv_message");
}
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ReceiveMessage.WakeInsideCombiner next complete: %s "
"new_state=%s",
@ -906,7 +904,7 @@ void BaseCallData::ReceiveMessage::WakeInsideCombiner(Flusher* flusher,
case State::kPulledFromPipe: {
CHECK(push_.has_value());
if ((*push_)().ready()) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ReceiveMessage.WakeInsideCombiner push complete",
base_->LogTag().c_str());
@ -1022,7 +1020,7 @@ class ClientCallData::PollContext {
void Run() {
DCHECK(HasContext<Arena>());
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run %s",
self_->LogTag().c_str(), self_->DebugString().c_str());
}
@ -1106,7 +1104,7 @@ class ClientCallData::PollContext {
case SendInitialState::kForwarded: {
// Poll the promise once since we're waiting for it.
Poll<ServerMetadataHandle> poll = self_->promise_();
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ClientCallData.PollContext.Run: poll=%s; %s",
self_->LogTag().c_str(),
PollToString(poll,
@ -1386,7 +1384,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
CapturedBatch batch(b);
Flusher flusher(this);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s StartBatch %s", LogTag().c_str(),
DebugString().c_str());
}
@ -1510,7 +1508,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ClientCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s Cancel error=%s", LogTag().c_str(),
error.ToString().c_str());
}
@ -1590,7 +1588,7 @@ void ClientCallData::StartPromise(Flusher* flusher) {
}
void ClientCallData::RecvInitialMetadataReady(grpc_error_handle error) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ClientCallData.RecvInitialMetadataReady %s error:%s md:%s",
LogTag().c_str(), DebugString().c_str(), error.ToString().c_str(),
@ -1674,7 +1672,7 @@ void ClientCallData::HookRecvTrailingMetadata(CapturedBatch batch) {
// - return a wrapper around PollTrailingMetadata as the promise.
ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
CallArgs call_args) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ClientCallData.MakeNextPromise %s", LogTag().c_str(),
DebugString().c_str());
}
@ -1737,7 +1735,7 @@ ArenaPromise<ServerMetadataHandle> ClientCallData::MakeNextPromise(
// All polls: await receiving the trailing metadata, then return it to the
// application.
Poll<ServerMetadataHandle> ClientCallData::PollTrailingMetadata() {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ClientCallData.PollTrailingMetadata %s",
LogTag().c_str(), DebugString().c_str());
}
@ -1788,7 +1786,7 @@ void ClientCallData::RecvTrailingMetadataReadyCallback(
void ClientCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
Flusher flusher(this);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s ClientCallData.RecvTrailingMetadataReady "
"recv_trailing_state=%s error=%s md=%s",
@ -2003,7 +2001,7 @@ ServerCallData::ServerCallData(grpc_call_element* elem,
}
ServerCallData::~ServerCallData() {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s ~ServerCallData %s", LogTag().c_str(),
DebugString().c_str());
}
@ -2031,7 +2029,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
Flusher flusher(this);
bool wake = false;
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s StartBatch: %s", LogTag().c_str(),
DebugString().c_str());
}
@ -2165,7 +2163,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ServerCallData::Completed(grpc_error_handle error,
bool tarpit_cancellation, Flusher* flusher) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(
GPR_DEBUG,
"%sServerCallData::Completed: send_trailing_state=%s "
@ -2291,7 +2289,7 @@ ArenaPromise<ServerMetadataHandle> ServerCallData::MakeNextPromise(
// All polls: await sending the trailing metadata, then foward it down the
// stack.
Poll<ServerMetadataHandle> ServerCallData::PollTrailingMetadata() {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s PollTrailingMetadata: %s", LogTag().c_str(),
StateString(send_trailing_state_));
}
@ -2322,7 +2320,7 @@ void ServerCallData::RecvTrailingMetadataReadyCallback(
}
void ServerCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s: RecvTrailingMetadataReady error=%s md=%s",
LogTag().c_str(), error.ToString().c_str(),
recv_trailing_metadata_->DebugString().c_str());
@ -2342,7 +2340,7 @@ void ServerCallData::RecvInitialMetadataReadyCallback(void* arg,
void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
Flusher flusher(this);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s: RecvInitialMetadataReady %s", LogTag().c_str(),
error.ToString().c_str());
}
@ -2407,7 +2405,7 @@ std::string ServerCallData::DebugString() const {
// Wakeup and poll the promise if appropriate.
void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
PollContext poll_ctx(this, flusher);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s: WakeInsideCombiner %s", LogTag().c_str(),
DebugString().c_str());
}
@ -2430,12 +2428,12 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
}
if (send_initial_metadata_->metadata_push_.has_value()) {
if ((*send_initial_metadata_->metadata_push_)().ready()) {
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s: WakeInsideCombiner: metadata_push done",
LogTag().c_str());
}
send_initial_metadata_->metadata_push_.reset();
} else if (grpc_trace_channel.enabled()) {
} else if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO, "%s: WakeInsideCombiner: metadata_push pending",
LogTag().c_str());
}
@ -2453,7 +2451,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
flusher,
send_initial_metadata_ == nullptr ||
send_initial_metadata_->state == SendInitialMetadata::kForwarded);
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_DEBUG,
"%s: After send_message WakeInsideCombiner %s is_idle=%s "
"is_forwarded=%s",
@ -2482,7 +2480,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
if (promise_.has_value()) {
Poll<ServerMetadataHandle> poll;
poll = promise_();
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(
GPR_INFO,
"%s: WakeInsideCombiner poll=%s; send_initial_metadata=%s "
@ -2502,7 +2500,7 @@ void ServerCallData::WakeInsideCombiner(Flusher* flusher) {
SendInitialMetadata::kQueuedAndPushedToPipe) {
CHECK(send_initial_metadata_->metadata_next_.has_value());
auto p = (*send_initial_metadata_->metadata_next_)();
if (grpc_trace_channel.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(channel)) {
gpr_log(GPR_INFO,
"%s: WakeInsideCombiner send_initial_metadata poll=%s",
LogTag().c_str(),

@ -24,97 +24,79 @@
#include "absl/log/log.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/lib/config/config_vars.h"
#include "src/core/lib/gprpp/glob.h"
int grpc_tracer_set_enabled(const char* name, int enabled);
namespace grpc_core {
TraceFlag* TraceFlagList::root_tracer_ = nullptr;
bool TraceFlagList::Set(absl::string_view name, bool enabled) {
TraceFlag* t;
if (name == "all") {
for (t = root_tracer_; t; t = t->next_tracer_) {
t->set_enabled(enabled);
}
} else if (name == "list_tracers") {
LogAllTracers();
} else if (name == "refcount") {
for (t = root_tracer_; t; t = t->next_tracer_) {
if (absl::StrContains(t->name_, "refcount")) {
t->set_enabled(enabled);
}
}
} else {
bool found = false;
for (t = root_tracer_; t; t = t->next_tracer_) {
if (name == t->name_) {
t->set_enabled(enabled);
found = true;
}
}
// check for unknowns, but ignore "", to allow to GRPC_TRACE=
if (!found && !name.empty()) {
gpr_log(GPR_ERROR, "Unknown trace var: '%s'", std::string(name).c_str());
return false; // early return
}
}
return true;
}
void TraceFlagList::Add(TraceFlag* flag) {
flag->next_tracer_ = root_tracer_;
root_tracer_ = flag;
}
void TraceFlagList::LogAllTracers() {
VLOG(2) << "available tracers:";
for (TraceFlag* t = root_tracer_; t != nullptr; t = t->next_tracer_) {
VLOG(2) << "\t" << t->name_;
namespace {
void LogAllTracers() {
gpr_log(GPR_DEBUG, "available tracers:");
for (const auto& name : GetAllTraceFlags()) {
LOG(INFO) << " " << name.first;
}
}
void TraceFlagList::SaveTo(std::map<std::string, bool>& values) {
for (TraceFlag* t = root_tracer_; t != nullptr; t = t->next_tracer_) {
values[t->name_] = t->enabled();
}
}
} // namespace
// Flags register themselves on the list during construction
TraceFlag::TraceFlag(bool default_enabled, const char* name) : name_(name) {
static_assert(std::is_trivially_destructible<TraceFlag>::value,
"TraceFlag needs to be trivially destructible.");
set_enabled(default_enabled);
TraceFlagList::Add(this);
}
SavedTraceFlags::SavedTraceFlags() { TraceFlagList::SaveTo(values_); }
SavedTraceFlags::SavedTraceFlags() {
for (const auto& flag : GetAllTraceFlags()) {
values_[flag.first] = {flag.second->enabled(), flag.second};
}
}
void SavedTraceFlags::Restore() {
for (const auto& flag : values_) {
TraceFlagList::Set(flag.first, flag.second);
flag.second.second->set_enabled(flag.second.first);
}
}
namespace {
void ParseTracers(absl::string_view tracers) {
for (auto s : absl::StrSplit(tracers, ',', absl::SkipWhitespace())) {
if (s[0] == '-') {
TraceFlagList::Set(s.substr(1), false);
} else {
TraceFlagList::Set(s, true);
bool ParseTracers(absl::string_view tracers) {
std::string enabled_tracers;
bool some_trace_was_found = false;
for (auto trace_glob : absl::StrSplit(tracers, ',', absl::SkipWhitespace())) {
if (trace_glob == "list_tracers") {
LogAllTracers();
continue;
}
bool enabled = !absl::ConsumePrefix(&trace_glob, "-");
if (trace_glob == "all") trace_glob = "*";
if (trace_glob == "refcount") trace_glob = "*refcount*";
bool found = false;
for (const auto& flag : GetAllTraceFlags()) {
if (GlobMatch(flag.first, trace_glob)) {
flag.second->set_enabled(enabled);
if (enabled) absl::StrAppend(&enabled_tracers, flag.first, ", ");
found = true;
some_trace_was_found = true;
}
}
} // namespace
if (!found) LOG(ERROR) << "Unknown tracer: " << trace_glob;
}
if (!enabled_tracers.empty()) {
absl::string_view enabled_tracers_view(enabled_tracers);
absl::ConsumeSuffix(&enabled_tracers_view, ", ");
LOG(INFO) << "gRPC Tracers: " << enabled_tracers_view;
}
return some_trace_was_found;
}
} // namespace grpc_core
@ -123,5 +105,6 @@ void grpc_tracer_init() {
}
int grpc_tracer_set_enabled(const char* name, int enabled) {
return grpc_core::TraceFlagList::Set(name, enabled != 0);
if (enabled != 0) return grpc_core::ParseTracers(name);
return grpc_core::ParseTracers(absl::StrCat("-", name));
}

@ -1,5 +1,3 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
@ -13,104 +11,11 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#ifndef GRPC_SRC_CORE_LIB_DEBUG_TRACE_H
#define GRPC_SRC_CORE_LIB_DEBUG_TRACE_H
#include <atomic>
#include <map>
#include <string>
#include "absl/strings/string_view.h"
#include <grpc/support/port_platform.h>
void grpc_tracer_init();
void grpc_tracer_shutdown(void);
namespace grpc_core {
class TraceFlag;
class TraceFlagList {
public:
static bool Set(absl::string_view name, bool enabled);
static void Add(TraceFlag* flag);
static void SaveTo(std::map<std::string, bool>& values);
private:
static void LogAllTracers();
static TraceFlag* root_tracer_;
};
namespace testing {
void grpc_tracer_enable_flag(TraceFlag* flag);
}
class TraceFlag {
public:
TraceFlag(bool default_enabled, const char* name);
// TraceFlag needs to be trivially destructible since it is used as global
// variable.
~TraceFlag() = default;
const char* name() const { return name_; }
// Use the symbol GRPC_USE_TRACERS to determine if tracers will be enabled in
// opt builds (tracers are always on in dbg builds). The default in OSS is for
// tracers to be on since we support binary distributions of gRPC for the
// wrapped language (wr don't want to force recompilation to get tracing).
// Internally, however, for performance reasons, we compile them out by
// default, since internal build systems make recompiling trivial.
//
// Prefer GRPC_TRACE_FLAG_ENABLED() macro instead of using enabled() directly.
#define GRPC_USE_TRACERS // tracers on by default in OSS
#if defined(GRPC_USE_TRACERS) || !defined(NDEBUG)
bool enabled() { return value_.load(std::memory_order_relaxed); }
#else
bool enabled() { return false; }
#endif // defined(GRPC_USE_TRACERS) || !defined(NDEBUG)
private:
friend void testing::grpc_tracer_enable_flag(TraceFlag* flag);
friend class TraceFlagList;
void set_enabled(bool enabled) {
value_.store(enabled, std::memory_order_relaxed);
}
TraceFlag* next_tracer_;
const char* const name_;
std::atomic<bool> value_;
};
#define GRPC_TRACE_FLAG_ENABLED(f) GPR_UNLIKELY((f).enabled())
#ifndef NDEBUG
typedef TraceFlag DebugOnlyTraceFlag;
#else
class DebugOnlyTraceFlag {
public:
constexpr DebugOnlyTraceFlag(bool /*default_enabled*/, const char* /*name*/) {
}
constexpr bool enabled() const { return false; }
constexpr const char* name() const { return "DebugOnlyTraceFlag"; }
private:
void set_enabled(bool /*enabled*/) {}
};
#endif
class SavedTraceFlags {
public:
SavedTraceFlags();
void Restore();
private:
std::map<std::string, bool> values_;
};
} // namespace grpc_core
#include "src/core/lib/debug/trace_flags.h" // IWYU pragma: export
#include "src/core/lib/debug/trace_impl.h" // IWYU pragma: export
#endif // GRPC_SRC_CORE_LIB_DEBUG_TRACE_H

@ -11,3 +11,245 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Automatically generated by tools/codegen/core/gen_trace_flags.py
//
#include "absl/container/flat_hash_map.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/no_destruct.h"
namespace grpc_core {
DebugOnlyTraceFlag auth_context_refcount_trace(false, "auth_context_refcount");
DebugOnlyTraceFlag call_combiner_trace(false, "call_combiner");
DebugOnlyTraceFlag call_refcount_trace(false, "call_refcount");
DebugOnlyTraceFlag closure_trace(false, "closure");
DebugOnlyTraceFlag combiner_trace(false, "combiner");
DebugOnlyTraceFlag cq_refcount_trace(false, "cq_refcount");
DebugOnlyTraceFlag error_refcount_trace(false, "error_refcount");
DebugOnlyTraceFlag fd_refcount_trace(false, "fd_refcount");
DebugOnlyTraceFlag fd_trace_trace(false, "fd_trace");
DebugOnlyTraceFlag lb_policy_refcount_trace(false, "lb_policy_refcount");
DebugOnlyTraceFlag party_state_trace(false, "party_state");
DebugOnlyTraceFlag pending_tags_trace(false, "pending_tags");
DebugOnlyTraceFlag polling_trace(false, "polling");
DebugOnlyTraceFlag polling_api_trace(false, "polling_api");
DebugOnlyTraceFlag promise_primitives_trace(false, "promise_primitives");
DebugOnlyTraceFlag resolver_refcount_trace(false, "resolver_refcount");
DebugOnlyTraceFlag security_connector_refcount_trace(
false, "security_connector_refcount");
DebugOnlyTraceFlag slice_refcount_trace(false, "slice_refcount");
DebugOnlyTraceFlag stream_refcount_trace(false, "stream_refcount");
DebugOnlyTraceFlag subchannel_refcount_trace(false, "subchannel_refcount");
DebugOnlyTraceFlag work_serializer_trace(false, "work_serializer");
TraceFlag api_trace(false, "api");
TraceFlag apple_polling_trace(false, "apple_polling");
TraceFlag backend_metric_trace(false, "backend_metric");
TraceFlag backend_metric_filter_trace(false, "backend_metric_filter");
TraceFlag bdp_estimator_trace(false, "bdp_estimator");
TraceFlag call_trace(false, "call");
TraceFlag call_error_trace(false, "call_error");
TraceFlag cares_address_sorting_trace(false, "cares_address_sorting");
TraceFlag cares_resolver_trace(false, "cares_resolver");
TraceFlag cds_lb_trace(false, "cds_lb");
TraceFlag channel_trace(false, "channel");
TraceFlag channel_stack_trace(false, "channel_stack");
TraceFlag chaotic_good_trace(false, "chaotic_good");
TraceFlag chttp2_hpack_parser_trace(false, "chttp2_hpack_parser");
TraceFlag chttp2_new_stream_trace(false, "chttp2_new_stream");
TraceFlag client_channel_trace(false, "client_channel");
TraceFlag client_channel_call_trace(false, "client_channel_call");
TraceFlag client_channel_lb_call_trace(false, "client_channel_lb_call");
TraceFlag client_idle_filter_trace(false, "client_idle_filter");
TraceFlag compression_trace(false, "compression");
TraceFlag connectivity_state_trace(false, "connectivity_state");
TraceFlag cronet_trace(false, "cronet");
TraceFlag dns_resolver_trace(false, "dns_resolver");
TraceFlag environment_autodetect_trace(false, "environment_autodetect");
TraceFlag event_engine_trace(false, "event_engine");
TraceFlag event_engine_client_channel_resolver_trace(
false, "event_engine_client_channel_resolver");
TraceFlag event_engine_dns_trace(false, "event_engine_dns");
TraceFlag event_engine_endpoint_trace(false, "event_engine_endpoint");
TraceFlag event_engine_endpoint_data_trace(false, "event_engine_endpoint_data");
TraceFlag event_engine_poller_trace(false, "event_engine_poller");
TraceFlag executor_trace(false, "executor");
TraceFlag fault_injection_filter_trace(false, "fault_injection_filter");
TraceFlag flowctl_trace(false, "flowctl");
TraceFlag fork_trace(false, "fork");
TraceFlag fuzzing_ee_timers_trace(false, "fuzzing_ee_timers");
TraceFlag fuzzing_ee_writes_trace(false, "fuzzing_ee_writes");
TraceFlag glb_trace(false, "glb");
TraceFlag grpc_authz_api_trace(false, "grpc_authz_api");
TraceFlag handshaker_trace(false, "handshaker");
TraceFlag health_check_client_trace(false, "health_check_client");
TraceFlag http_trace(false, "http");
TraceFlag http1_trace(false, "http1");
TraceFlag http2_ping_trace(false, "http2_ping");
TraceFlag http2_stream_state_trace(false, "http2_stream_state");
TraceFlag http_keepalive_trace(false, "http_keepalive");
TraceFlag inproc_trace(false, "inproc");
TraceFlag metadata_query_trace(false, "metadata_query");
TraceFlag op_failure_trace(false, "op_failure");
TraceFlag orca_client_trace(false, "orca_client");
TraceFlag outlier_detection_lb_trace(false, "outlier_detection_lb");
TraceFlag pick_first_trace(false, "pick_first");
TraceFlag plugin_credentials_trace(false, "plugin_credentials");
TraceFlag priority_lb_trace(false, "priority_lb");
TraceFlag queue_pluck_trace(false, "queue_pluck");
TraceFlag resource_quota_trace(false, "resource_quota");
TraceFlag retry_trace(false, "retry");
TraceFlag ring_hash_lb_trace(false, "ring_hash_lb");
TraceFlag rls_lb_trace(false, "rls_lb");
TraceFlag round_robin_trace(false, "round_robin");
TraceFlag secure_endpoint_trace(false, "secure_endpoint");
TraceFlag server_channel_trace(false, "server_channel");
TraceFlag stateful_session_filter_trace(false, "stateful_session_filter");
TraceFlag subchannel_trace(false, "subchannel");
TraceFlag subchannel_pool_trace(false, "subchannel_pool");
TraceFlag tcp_trace(false, "tcp");
TraceFlag timer_trace(false, "timer");
TraceFlag timer_check_trace(false, "timer_check");
TraceFlag tsi_trace(false, "tsi");
TraceFlag weighted_round_robin_lb_trace(false, "weighted_round_robin_lb");
TraceFlag weighted_target_lb_trace(false, "weighted_target_lb");
TraceFlag xds_client_trace(false, "xds_client");
TraceFlag xds_client_refcount_trace(false, "xds_client_refcount");
TraceFlag xds_cluster_impl_lb_trace(false, "xds_cluster_impl_lb");
TraceFlag xds_cluster_manager_lb_trace(false, "xds_cluster_manager_lb");
TraceFlag xds_cluster_resource_type_test_trace(
true, "xds_cluster_resource_type_test");
TraceFlag xds_common_types_test_trace(true, "xds_common_types_test");
TraceFlag xds_endpoint_resource_type_test_trace(
true, "xds_endpoint_resource_type_test");
TraceFlag xds_listener_resource_type_test_trace(
true, "xds_listener_resource_type_test");
TraceFlag xds_override_host_lb_trace(false, "xds_override_host_lb");
TraceFlag xds_resolver_trace(false, "xds_resolver");
TraceFlag xds_route_config_resource_type_test_trace(
true, "xds_route_config_resource_type_test");
TraceFlag xds_server_config_fetcher_trace(false, "xds_server_config_fetcher");
TraceFlag xds_wrr_locality_lb_trace(false, "xds_wrr_locality_lb");
const absl::flat_hash_map<std::string, TraceFlag*>& GetAllTraceFlags() {
static const NoDestruct<absl::flat_hash_map<std::string, TraceFlag*>> all(
absl::flat_hash_map<std::string, TraceFlag*>({
{"api", &api_trace},
{"apple_polling", &apple_polling_trace},
{"backend_metric", &backend_metric_trace},
{"backend_metric_filter", &backend_metric_filter_trace},
{"bdp_estimator", &bdp_estimator_trace},
{"call", &call_trace},
{"call_error", &call_error_trace},
{"cares_address_sorting", &cares_address_sorting_trace},
{"cares_resolver", &cares_resolver_trace},
{"cds_lb", &cds_lb_trace},
{"channel", &channel_trace},
{"channel_stack", &channel_stack_trace},
{"chaotic_good", &chaotic_good_trace},
{"chttp2_hpack_parser", &chttp2_hpack_parser_trace},
{"chttp2_new_stream", &chttp2_new_stream_trace},
{"client_channel", &client_channel_trace},
{"client_channel_call", &client_channel_call_trace},
{"client_channel_lb_call", &client_channel_lb_call_trace},
{"client_idle_filter", &client_idle_filter_trace},
{"compression", &compression_trace},
{"connectivity_state", &connectivity_state_trace},
{"cronet", &cronet_trace},
{"dns_resolver", &dns_resolver_trace},
{"environment_autodetect", &environment_autodetect_trace},
{"event_engine", &event_engine_trace},
{"event_engine_client_channel_resolver",
&event_engine_client_channel_resolver_trace},
{"event_engine_dns", &event_engine_dns_trace},
{"event_engine_endpoint", &event_engine_endpoint_trace},
{"event_engine_endpoint_data", &event_engine_endpoint_data_trace},
{"event_engine_poller", &event_engine_poller_trace},
{"executor", &executor_trace},
{"fault_injection_filter", &fault_injection_filter_trace},
{"flowctl", &flowctl_trace},
{"fork", &fork_trace},
{"fuzzing_ee_timers", &fuzzing_ee_timers_trace},
{"fuzzing_ee_writes", &fuzzing_ee_writes_trace},
{"glb", &glb_trace},
{"grpc_authz_api", &grpc_authz_api_trace},
{"handshaker", &handshaker_trace},
{"health_check_client", &health_check_client_trace},
{"http", &http_trace},
{"http1", &http1_trace},
{"http2_ping", &http2_ping_trace},
{"http2_stream_state", &http2_stream_state_trace},
{"http_keepalive", &http_keepalive_trace},
{"inproc", &inproc_trace},
{"metadata_query", &metadata_query_trace},
{"op_failure", &op_failure_trace},
{"orca_client", &orca_client_trace},
{"outlier_detection_lb", &outlier_detection_lb_trace},
{"pick_first", &pick_first_trace},
{"plugin_credentials", &plugin_credentials_trace},
{"priority_lb", &priority_lb_trace},
{"queue_pluck", &queue_pluck_trace},
{"resource_quota", &resource_quota_trace},
{"retry", &retry_trace},
{"ring_hash_lb", &ring_hash_lb_trace},
{"rls_lb", &rls_lb_trace},
{"round_robin", &round_robin_trace},
{"secure_endpoint", &secure_endpoint_trace},
{"server_channel", &server_channel_trace},
{"stateful_session_filter", &stateful_session_filter_trace},
{"subchannel", &subchannel_trace},
{"subchannel_pool", &subchannel_pool_trace},
{"tcp", &tcp_trace},
{"timer", &timer_trace},
{"timer_check", &timer_check_trace},
{"tsi", &tsi_trace},
{"weighted_round_robin_lb", &weighted_round_robin_lb_trace},
{"weighted_target_lb", &weighted_target_lb_trace},
{"xds_client", &xds_client_trace},
{"xds_client_refcount", &xds_client_refcount_trace},
{"xds_cluster_impl_lb", &xds_cluster_impl_lb_trace},
{"xds_cluster_manager_lb", &xds_cluster_manager_lb_trace},
{"xds_cluster_resource_type_test",
&xds_cluster_resource_type_test_trace},
{"xds_common_types_test", &xds_common_types_test_trace},
{"xds_endpoint_resource_type_test",
&xds_endpoint_resource_type_test_trace},
{"xds_listener_resource_type_test",
&xds_listener_resource_type_test_trace},
{"xds_override_host_lb", &xds_override_host_lb_trace},
{"xds_resolver", &xds_resolver_trace},
{"xds_route_config_resource_type_test",
&xds_route_config_resource_type_test_trace},
{"xds_server_config_fetcher", &xds_server_config_fetcher_trace},
{"xds_wrr_locality_lb", &xds_wrr_locality_lb_trace},
#ifndef NDEBUG
{"auth_context_refcount", &auth_context_refcount_trace},
{"call_combiner", &call_combiner_trace},
{"call_refcount", &call_refcount_trace},
{"closure", &closure_trace},
{"combiner", &combiner_trace},
{"cq_refcount", &cq_refcount_trace},
{"error_refcount", &error_refcount_trace},
{"fd_refcount", &fd_refcount_trace},
{"fd_trace", &fd_trace_trace},
{"lb_policy_refcount", &lb_policy_refcount_trace},
{"party_state", &party_state_trace},
{"pending_tags", &pending_tags_trace},
{"polling", &polling_trace},
{"polling_api", &polling_api_trace},
{"promise_primitives", &promise_primitives_trace},
{"resolver_refcount", &resolver_refcount_trace},
{"security_connector_refcount", &security_connector_refcount_trace},
{"slice_refcount", &slice_refcount_trace},
{"stream_refcount", &stream_refcount_trace},
{"subchannel_refcount", &subchannel_refcount_trace},
{"work_serializer", &work_serializer_trace},
#endif
}));
return *all;
}
} // namespace grpc_core

@ -12,7 +12,122 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Automatically generated by tools/codegen/core/gen_trace_flags.py
//
#ifndef GRPC_SRC_CORE_LIB_DEBUG_TRACE_FLAGS_H
#define GRPC_SRC_CORE_LIB_DEBUG_TRACE_FLAGS_H
#include "src/core/lib/debug/trace_impl.h"
namespace grpc_core {
extern DebugOnlyTraceFlag auth_context_refcount_trace;
extern DebugOnlyTraceFlag call_combiner_trace;
extern DebugOnlyTraceFlag call_refcount_trace;
extern DebugOnlyTraceFlag closure_trace;
extern DebugOnlyTraceFlag combiner_trace;
extern DebugOnlyTraceFlag cq_refcount_trace;
extern DebugOnlyTraceFlag error_refcount_trace;
extern DebugOnlyTraceFlag fd_refcount_trace;
extern DebugOnlyTraceFlag fd_trace_trace;
extern DebugOnlyTraceFlag lb_policy_refcount_trace;
extern DebugOnlyTraceFlag party_state_trace;
extern DebugOnlyTraceFlag pending_tags_trace;
extern DebugOnlyTraceFlag polling_trace;
extern DebugOnlyTraceFlag polling_api_trace;
extern DebugOnlyTraceFlag promise_primitives_trace;
extern DebugOnlyTraceFlag resolver_refcount_trace;
extern DebugOnlyTraceFlag security_connector_refcount_trace;
extern DebugOnlyTraceFlag slice_refcount_trace;
extern DebugOnlyTraceFlag stream_refcount_trace;
extern DebugOnlyTraceFlag subchannel_refcount_trace;
extern DebugOnlyTraceFlag work_serializer_trace;
extern TraceFlag api_trace;
extern TraceFlag apple_polling_trace;
extern TraceFlag backend_metric_trace;
extern TraceFlag backend_metric_filter_trace;
extern TraceFlag bdp_estimator_trace;
extern TraceFlag call_trace;
extern TraceFlag call_error_trace;
extern TraceFlag cares_address_sorting_trace;
extern TraceFlag cares_resolver_trace;
extern TraceFlag cds_lb_trace;
extern TraceFlag channel_trace;
extern TraceFlag channel_stack_trace;
extern TraceFlag chaotic_good_trace;
extern TraceFlag chttp2_hpack_parser_trace;
extern TraceFlag chttp2_new_stream_trace;
extern TraceFlag client_channel_trace;
extern TraceFlag client_channel_call_trace;
extern TraceFlag client_channel_lb_call_trace;
extern TraceFlag client_idle_filter_trace;
extern TraceFlag compression_trace;
extern TraceFlag connectivity_state_trace;
extern TraceFlag cronet_trace;
extern TraceFlag dns_resolver_trace;
extern TraceFlag environment_autodetect_trace;
extern TraceFlag event_engine_trace;
extern TraceFlag event_engine_client_channel_resolver_trace;
extern TraceFlag event_engine_dns_trace;
extern TraceFlag event_engine_endpoint_trace;
extern TraceFlag event_engine_endpoint_data_trace;
extern TraceFlag event_engine_poller_trace;
extern TraceFlag executor_trace;
extern TraceFlag fault_injection_filter_trace;
extern TraceFlag flowctl_trace;
extern TraceFlag fork_trace;
extern TraceFlag fuzzing_ee_timers_trace;
extern TraceFlag fuzzing_ee_writes_trace;
extern TraceFlag glb_trace;
extern TraceFlag grpc_authz_api_trace;
extern TraceFlag handshaker_trace;
extern TraceFlag health_check_client_trace;
extern TraceFlag http_trace;
extern TraceFlag http1_trace;
extern TraceFlag http2_ping_trace;
extern TraceFlag http2_stream_state_trace;
extern TraceFlag http_keepalive_trace;
extern TraceFlag inproc_trace;
extern TraceFlag metadata_query_trace;
extern TraceFlag op_failure_trace;
extern TraceFlag orca_client_trace;
extern TraceFlag outlier_detection_lb_trace;
extern TraceFlag pick_first_trace;
extern TraceFlag plugin_credentials_trace;
extern TraceFlag priority_lb_trace;
extern TraceFlag queue_pluck_trace;
extern TraceFlag resource_quota_trace;
extern TraceFlag retry_trace;
extern TraceFlag ring_hash_lb_trace;
extern TraceFlag rls_lb_trace;
extern TraceFlag round_robin_trace;
extern TraceFlag secure_endpoint_trace;
extern TraceFlag server_channel_trace;
extern TraceFlag stateful_session_filter_trace;
extern TraceFlag subchannel_trace;
extern TraceFlag subchannel_pool_trace;
extern TraceFlag tcp_trace;
extern TraceFlag timer_trace;
extern TraceFlag timer_check_trace;
extern TraceFlag tsi_trace;
extern TraceFlag weighted_round_robin_lb_trace;
extern TraceFlag weighted_target_lb_trace;
extern TraceFlag xds_client_trace;
extern TraceFlag xds_client_refcount_trace;
extern TraceFlag xds_cluster_impl_lb_trace;
extern TraceFlag xds_cluster_manager_lb_trace;
extern TraceFlag xds_cluster_resource_type_test_trace;
extern TraceFlag xds_common_types_test_trace;
extern TraceFlag xds_endpoint_resource_type_test_trace;
extern TraceFlag xds_listener_resource_type_test_trace;
extern TraceFlag xds_override_host_lb_trace;
extern TraceFlag xds_resolver_trace;
extern TraceFlag xds_route_config_resource_type_test_trace;
extern TraceFlag xds_server_config_fetcher_trace;
extern TraceFlag xds_wrr_locality_lb_trace;
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_DEBUG_TRACE_FLAGS_H

@ -0,0 +1,363 @@
# Copyright 2024 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File format:
# Each config var must have a name, a default, and a description.
# Optionally:
# * a `debug_only: true` flag can be added to ensure that this flag is only
# enabled in debug builds.
# * an `internal: true` flag can be added to ensure that this flag does not
# show up in the environment_variables.md documentations.
api:
default: false
description: API calls to the C core.
apple_polling:
default: false
description: (legacy) Apple polling trace.
internal: true
auth_context_refcount:
debug_only: true
default: false
description: Auth context refcounting.
backend_metric:
default: false
description: C++ backend metric recorder APIs.
backend_metric_filter:
default: false
description: Filter that populates backend metric data in server trailing metadata.
bdp_estimator:
default: false
description: Behavior of bdp estimation logic.
call:
default: false
description: Traces operations on a call through the gRPC stack.
call_combiner:
debug_only: true
default: false
description: Call combiner state.
call_error:
default: false
description: Possible errors contributing to final call statuses.
call_refcount:
debug_only: true
default: false
description: Refcount on call.
cares_address_sorting:
default: false
description: Operations of the c-ares based DNS resolver's address sorter.
cares_resolver:
default: false
description: Operations of the c-ares based DNS resolver.
cds_lb:
default: false
description: CDS LB policy.
channel:
default: false
description: Operations on the C core channel stack.
channel_stack:
default: false
description: Construction of the set of filters in a channel stack.
chaotic_good:
default: false
description: Chaotic good transport.
chttp2_hpack_parser:
default: false
description: HTTP/2 HPACK parser.
chttp2_new_stream:
default: false
description: HTTP/2 incoming stream creation.
client_channel:
default: false
description: Client channel control plane activity, including resolver and load balancing policy interaction.
client_channel_call:
default: false
description: Client channel call activity related to name resolution.
client_channel_lb_call:
default: false
description: Client channel call activity related to load balancing picking.
client_idle_filter:
default: false
description: Client idleness filter.
closure:
debug_only: true
default: false
description: Legacy closure creation, scheduling, and completion.
combiner:
debug_only: true
default: false
description: Combiner lock state.
compression:
default: false
description: Compression operations.
connectivity_state:
default: false
description: Connectivity state changes to channels.
cq_refcount:
debug_only: true
default: false
description: Completion queue refcounting.
cronet:
default: false
description: Cronet transport engine.
dns_resolver:
default: false
description: The active DNS resolver.
environment_autodetect:
default: false
description: GCP environment auto-detection.
error_refcount:
debug_only: true
default: false
description: Error refcounting.
event_engine:
default: false
description: High-level EventEngine operations.
event_engine_client_channel_resolver:
default: false
description: EventEngine-based client channel resolver state and events.
event_engine_dns:
default: false
description: EventEngine DNS resolver.
event_engine_endpoint:
default: false
description: EventEngine Endpoint operations.
event_engine_endpoint_data:
default: false
description: Detailed dump of EventEngine endpoint TCP data.
event_engine_poller:
default: false
description: EventEngine Poller events.
executor:
default: false
description: gRPC's legacy thread pool ('the executor').
fault_injection_filter:
default: false
description: Fault injection.
fd_refcount:
debug_only: true
default: false
description: File descriptor refcounting.
fd_trace:
debug_only: true
default: false
description: Legacy file descriptor create(), shutdown() and close() calls for channel fds.
flowctl:
default: false
description: Http2 flow control.
fork:
default: false
description: Fork support.
fuzzing_ee_timers:
default: false
description: Fuzzing EventEngine timers.
internal: true
fuzzing_ee_writes:
default: false
description: Fuzzing EventEngine writes.
internal: true
glb:
default: false
description: gRPClb load balancer.
grpc_authz_api:
default: false
description: gRPC authorization.
handshaker:
default: false
description: Handshaking state.
health_check_client:
default: false
description: Health checking client code.
http:
default: false
description: Http2 transport engine.
http1:
default: false
description: HTTP/1.x operations performed by gRPC.
http2_ping:
default: false
description: Pings/ping acks/antagonist writes in http2 stack.
http2_stream_state:
default: false
description: Http2 stream state mutations.
http_keepalive:
default: false
description: gRPC keepalive pings.
inproc:
default: false
description: In-process transport.
lb_policy_refcount:
debug_only: true
default: false
description: LB policy refcounting.
metadata_query:
default: false
description: GCP metadata queries.
op_failure:
default: false
description: Error information when failure is pushed onto a completion queue. The `api` tracer must be enabled for this flag to have any effect.
orca_client:
default: false
description: Out-of-band backend metric reporting client.
outlier_detection_lb:
default: false
description: Outlier detection.
party_state:
debug_only: true
default: false
description: Coordination of activities related to a call.
pending_tags:
debug_only: true
default: false
description: Still-in-progress tags on completion queues. The `api` tracer must be enabled for this flag to have any effect.
pick_first:
default: false
description: Pick first load balancing policy.
plugin_credentials:
default: false
description: Plugin credentials.
polling:
debug_only: true
default: false
description: The active polling engine.
polling_api:
debug_only: true
default: false
description: API calls to polling engine.
priority_lb:
default: false
description: Priority LB policy.
promise_primitives:
debug_only: true
default: false
description: Low-level primitives in the promise library.
queue_pluck:
default: false
description: Completion queue plucking. The `api` tracer must be enabled for this flag to have any effect.
resolver_refcount:
debug_only: true
default: false
description: Resolver refcouting.
resource_quota:
default: false
description: Resource quota objects internals.
retry:
default: false
description: Call retries.
ring_hash_lb:
default: false
description: Ring hash load balancing policy.
rls_lb:
default: false
description: RLS load balancing policy.
round_robin:
default: false
description: Round robin load balancing policy.
secure_endpoint:
default: false
description: Bytes flowing through encrypted channels.
security_connector_refcount:
debug_only: true
default: false
description: Refcounting for security connectors (part of channel credentials).
server_channel:
default: false
description: Lightweight trace of significant server channel events.
slice_refcount:
debug_only: true
default: false
description: Slice refcounting.
stateful_session_filter:
default: false
description: Stateful session affinity.
stream_refcount:
debug_only: true
default: false
description: Stream refcounting.
subchannel:
default: false
description: Connectivity state of subchannels.
subchannel_pool:
default: false
description: Subchannel pool.
subchannel_refcount:
debug_only: true
default: false
description: Subchannel refcounting.
tcp:
default: false
description: Bytes in and out of a channel.
timer:
default: false
description: Timers (alarms) in the grpc internals.
timer_check:
default: false
description: more detailed trace of timer logic in grpc internals.
tsi:
default: false
description: TSI transport security.
weighted_round_robin_lb:
default: false
description: Weighted round robin load balancing policy.
weighted_target_lb:
default: false
description: Weighted target LB policy.
work_serializer:
debug_only: true
default: false
description: A synchronization mechanism used to ensure that only one thread is executing at a given time.
xds_client:
default: false
description: XDS client.
xds_client_refcount:
default: false
description: Refcount of XDS client.
xds_cluster_impl_lb:
default: false
description: XDS Cluster impl LB policy.
xds_cluster_manager_lb:
default: false
description: XDS Cluster manager LB policy.
xds_cluster_resource_type_test:
default: true
description: XDS Cluster resource type.
internal: true
xds_common_types_test:
default: true
description: XDS Common types.
internal: true
xds_endpoint_resource_type_test:
default: true
description: XDS Endpoint resource type.
internal: true
xds_listener_resource_type_test:
default: true
description: XDS Listener resource type.
internal: true
xds_override_host_lb:
default: false
description: XDS Override host LB.
xds_resolver:
default: false
description: XDS Resolver.
xds_route_config_resource_type_test:
default: true
description: XDS Route config resource type.
internal: true
xds_server_config_fetcher:
default: false
description: XDS Server config fetcher.
xds_wrr_locality_lb:
default: false
description: XDS WRR locality LB policy.

@ -0,0 +1,115 @@
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRPC_SRC_CORE_LIB_DEBUG_TRACE_IMPL_H
#define GRPC_SRC_CORE_LIB_DEBUG_TRACE_IMPL_H
#include <atomic>
#include <map>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include <grpc/support/port_platform.h>
void grpc_tracer_init();
void grpc_tracer_shutdown(void);
namespace grpc_core {
bool ParseTracers(absl::string_view tracers);
class SavedTraceFlags;
class TraceFlag;
namespace testing {
void grpc_tracer_enable_flag(TraceFlag* flag);
}
class TraceFlag {
public:
TraceFlag(bool default_enabled, const char* name);
// TraceFlag needs to be trivially destructible since it is used as global
// variable.
~TraceFlag() = default;
const char* name() const { return name_; }
// Use the symbol GRPC_USE_TRACERS to determine if tracers will be enabled in
// opt builds (tracers are always on in dbg builds). The default in OSS is for
// tracers to be on since we support binary distributions of gRPC for the
// wrapped language (wr don't want to force recompilation to get tracing).
// Internally, however, for performance reasons, we compile them out by
// default, since internal build systems make recompiling trivial.
//
// Prefer GRPC_TRACE_FLAG_ENABLED() macro instead of using enabled() directly.
#define GRPC_USE_TRACERS // tracers on by default in OSS
#if defined(GRPC_USE_TRACERS) || !defined(NDEBUG)
bool enabled() { return value_.load(std::memory_order_relaxed); }
#else
bool enabled() { return false; }
#endif // defined(GRPC_USE_TRACERS) || !defined(NDEBUG)
private:
friend void testing::grpc_tracer_enable_flag(TraceFlag* flag);
friend bool ParseTracers(absl::string_view tracers);
friend SavedTraceFlags;
void set_enabled(bool enabled) {
value_.store(enabled, std::memory_order_relaxed);
}
TraceFlag* next_tracer_;
const char* const name_;
std::atomic<bool> value_;
};
#define GRPC_TRACE_FLAG_ENABLED_OBJ(obj) GPR_UNLIKELY((obj).enabled())
#define GRPC_TRACE_FLAG_ENABLED(tracer) \
GPR_UNLIKELY((grpc_core::tracer##_trace).enabled())
#define GRPC_TRACE_LOG(tracer, level) \
LOG_IF(level, GRPC_TRACE_FLAG_ENABLED(tracer))
#define GRPC_TRACE_VLOG(tracer, level) \
if (GRPC_TRACE_FLAG_ENABLED(tracer)) VLOG(level)
#ifndef NDEBUG
typedef TraceFlag DebugOnlyTraceFlag;
#else
class DebugOnlyTraceFlag {
public:
constexpr DebugOnlyTraceFlag(bool /*default_enabled*/, const char* /*name*/) {
}
constexpr bool enabled() const { return false; }
constexpr const char* name() const { return "DebugOnlyTraceFlag"; }
private:
void set_enabled(bool /*enabled*/) {}
};
#endif
class SavedTraceFlags {
public:
SavedTraceFlags();
void Restore();
private:
std::map<std::string, std::pair<bool, TraceFlag*>> values_;
};
const absl::flat_hash_map<std::string, TraceFlag*>& GetAllTraceFlags();
} // namespace grpc_core
#endif // GRPC_SRC_CORE_LIB_DEBUG_TRACE_IMPL_H

@ -81,8 +81,6 @@
namespace grpc_event_engine {
namespace experimental {
grpc_core::TraceFlag grpc_trace_ares_resolver(false, "cares_resolver");
namespace {
absl::Status AresStatusToAbslStatus(int status, absl::string_view error_msg) {
@ -222,8 +220,7 @@ AresResolver::AresResolver(
std::unique_ptr<GrpcPolledFdFactory> polled_fd_factory,
std::shared_ptr<EventEngine> event_engine, ares_channel channel)
: RefCountedDNSResolverInterface(
GRPC_TRACE_FLAG_ENABLED(grpc_trace_ares_resolver) ? "AresResolver"
: nullptr),
GRPC_TRACE_FLAG_ENABLED(cares_resolver) ? "AresResolver" : nullptr),
channel_(channel),
polled_fd_factory_(std::move(polled_fd_factory)),
event_engine_(std::move(event_engine)) {
@ -768,7 +765,7 @@ void AresResolver::OnTXTDoneLocked(void* arg, int status, int /*timeouts*/,
}
GRPC_ARES_RESOLVER_TRACE_LOG("resolver:%p Got %zu TXT records", ares_resolver,
result.size());
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_ares_resolver)) {
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) {
for (const auto& record : result) {
LOG(INFO) << record;
}

@ -46,11 +46,9 @@
namespace grpc_event_engine {
namespace experimental {
extern grpc_core::TraceFlag grpc_trace_ares_resolver;
#define GRPC_ARES_RESOLVER_TRACE_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_ares_resolver)) { \
if (GRPC_TRACE_FLAG_ENABLED(cares_resolver)) { \
gpr_log(GPR_INFO, "(EventEngine c-ares resolver) " format, __VA_ARGS__); \
} \
} while (0)

@ -63,7 +63,7 @@ CFEventEngine::CFEventEngine()
CFEventEngine::~CFEventEngine() {
{
grpc_core::MutexLock lock(&task_mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) {
for (auto handle : known_handles_) {
gpr_log(GPR_ERROR,
"CFEventEngine:%p uncleared TaskHandle at shutdown:%s", this,

@ -35,11 +35,9 @@ namespace experimental {
void DNSServiceResolverImpl::LookupHostname(
EventEngine::DNSResolver::LookupHostnameCallback on_resolve,
absl::string_view name, absl::string_view default_port) {
GRPC_EVENT_ENGINE_DNS_TRACE(
"DNSServiceResolverImpl::LookupHostname: name: %.*s, default_port: %.*s, "
"this: %p",
static_cast<int>(name.length()), name.data(),
static_cast<int>(default_port.length()), default_port.data(), this);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "DNSServiceResolverImpl::LookupHostname: name: " << name
<< ", default_port: " << default_port << ", this: " << this;
absl::string_view host;
absl::string_view port_string;
@ -139,13 +137,12 @@ void DNSServiceResolverImpl::ResolveCallback(
DNSServiceRef sdRef, DNSServiceFlags flags, uint32_t interfaceIndex,
DNSServiceErrorType errorCode, const char* hostname,
const struct sockaddr* address, uint32_t ttl, void* context) {
GRPC_EVENT_ENGINE_DNS_TRACE(
"DNSServiceResolverImpl::ResolveCallback: sdRef: %p, flags: %x, "
"interface: %d, errorCode: %d, hostname: %s, addressFamily: %d, ttl: "
"%d, "
"this: %p",
sdRef, flags, interfaceIndex, errorCode, hostname, address->sa_family,
ttl, context);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "DNSServiceResolverImpl::ResolveCallback: sdRef: " << sdRef
<< ", flags: " << flags << ", interface: " << interfaceIndex
<< ", errorCode: " << errorCode << ", hostname: " << hostname
<< ", addressFamily: " << address->sa_family << ", ttl: " << ttl
<< ", this: " << context;
// no need to increase refcount here, since ResolveCallback and Shutdown is
// called from the serial queue and it is guarenteed that it won't be called
@ -195,12 +192,11 @@ void DNSServiceResolverImpl::ResolveCallback(
->sin6_port = htons(request.port);
}
GRPC_EVENT_ENGINE_DNS_TRACE(
"DNSServiceResolverImpl::ResolveCallback: "
"sdRef: %p, hostname: %s, addressPort: %s, this: %p",
sdRef, hostname,
ResolvedAddressToString(resolved_address).value_or("ERROR").c_str(),
context);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "DNSServiceResolverImpl::ResolveCallback: sdRef: " << sdRef
<< ", hostname: " << hostname << ", addressPort: "
<< ResolvedAddressToString(resolved_address).value_or("ERROR")
<< ", this: " << context;
}
// received both ipv4 and ipv6 responses, and no more responses (e.g. multiple
@ -231,10 +227,9 @@ void DNSServiceResolverImpl::Shutdown() {
for (auto& kv : that->requests_) {
auto& sdRef = kv.first;
auto& request = kv.second;
GRPC_EVENT_ENGINE_DNS_TRACE(
"DNSServiceResolverImpl::Shutdown sdRef: %p, this: %p", sdRef,
thatPtr);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "DNSServiceResolverImpl::Shutdown sdRef: " << sdRef
<< ", this: " << thatPtr;
request.on_resolve(
absl::CancelledError("DNSServiceResolverImpl::Shutdown"));
DNSServiceRefDeallocate(static_cast<DNSServiceRef>(sdRef));

@ -28,12 +28,11 @@
#include <vector>
#include "src/core/lib/config/config_vars.h"
#include "src/core/lib/debug/trace.h"
namespace grpc_event_engine {
namespace experimental {
grpc_core::TraceFlag grpc_trace_fork(false, "fork");
namespace {
bool IsForkEnabled() {
static bool enabled = grpc_core::ConfigVars::Get().EnableForkSupport();
@ -58,7 +57,7 @@ void ObjectGroupForkHandler::RegisterForkable(
void ObjectGroupForkHandler::Prefork() {
if (IsForkEnabled()) {
CHECK(!std::exchange(is_forking_, true));
GRPC_FORK_TRACE_LOG_STRING("PrepareFork");
GRPC_TRACE_LOG(fork, INFO) << "PrepareFork";
for (auto it = forkables_.begin(); it != forkables_.end();) {
auto shared = it->lock();
if (shared) {
@ -74,7 +73,7 @@ void ObjectGroupForkHandler::Prefork() {
void ObjectGroupForkHandler::PostforkParent() {
if (IsForkEnabled()) {
CHECK(is_forking_);
GRPC_FORK_TRACE_LOG_STRING("PostforkParent");
GRPC_TRACE_LOG(fork, INFO) << "PostforkParent";
for (auto it = forkables_.begin(); it != forkables_.end();) {
auto shared = it->lock();
if (shared) {
@ -91,7 +90,7 @@ void ObjectGroupForkHandler::PostforkParent() {
void ObjectGroupForkHandler::PostforkChild() {
if (IsForkEnabled()) {
CHECK(is_forking_);
GRPC_FORK_TRACE_LOG_STRING("PostforkChild");
GRPC_TRACE_LOG(fork, INFO) << "PostforkChild";
for (auto it = forkables_.begin(); it != forkables_.end();) {
auto shared = it->lock();
if (shared) {

@ -25,17 +25,6 @@
namespace grpc_event_engine {
namespace experimental {
extern grpc_core::TraceFlag grpc_trace_fork;
#define GRPC_FORK_TRACE_LOG(format, ...) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_fork)) { \
gpr_log(GPR_DEBUG, "[fork] " format, __VA_ARGS__); \
} \
} while (0)
#define GRPC_FORK_TRACE_LOG_STRING(format) GRPC_FORK_TRACE_LOG("%s", format)
// An interface to be implemented by EventEngines that wish to have managed fork
// support. The child class must guarantee that those methods are thread-safe.
class Forkable {

@ -460,7 +460,7 @@ struct PosixEventEngine::ClosureData final : public EventEngine::Closure {
PosixEventEngine::~PosixEventEngine() {
{
grpc_core::MutexLock lock(&mu_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) {
for (auto handle : known_handles_) {
gpr_log(GPR_ERROR,
"(event_engine) PosixEventEngine:%p uncleared "
@ -559,8 +559,8 @@ PosixEventEngine::GetDNSResolver(
// configuration.
if (ShouldUseAresDnsResolver()) {
#if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER)
GRPC_EVENT_ENGINE_DNS_TRACE("PosixEventEngine:%p creating AresResolver",
this);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "PosixEventEngine::" << this << " creating AresResolver";
auto ares_resolver = AresResolver::CreateAresResolver(
options.dns_server,
std::make_unique<GrpcPolledFdFactoryPosix>(poller_manager_->Poller()),
@ -572,8 +572,8 @@ PosixEventEngine::GetDNSResolver(
std::move(*ares_resolver));
#endif // GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET_ARES_EV_DRIVER)
}
GRPC_EVENT_ENGINE_DNS_TRACE(
"PosixEventEngine:%p creating NativePosixDNSResolver", this);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "PosixEventEngine::" << this << " creating NativePosixDNSResolver";
return std::make_unique<NativePosixDNSResolver>(shared_from_this());
#endif // GRPC_POSIX_SOCKET_RESOLVE_ADDRESS
}

@ -36,8 +36,6 @@ static thread_local bool g_timer_thread;
namespace grpc_event_engine {
namespace experimental {
grpc_core::DebugOnlyTraceFlag grpc_event_engine_timer_trace(false, "timer");
void TimerManager::RunSomeTimers(
std::vector<experimental::EventEngine::Closure*> timers) {
for (auto* timer : timers) {
@ -101,7 +99,7 @@ grpc_core::Timestamp TimerManager::Host::Now() {
void TimerManager::TimerInit(Timer* timer, grpc_core::Timestamp deadline,
experimental::EventEngine::Closure* closure) {
if (grpc_event_engine_timer_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(timer)) {
grpc_core::MutexLock lock(&mu_);
if (shutdown_) {
gpr_log(GPR_ERROR,
@ -121,7 +119,7 @@ void TimerManager::Shutdown() {
{
grpc_core::MutexLock lock(&mu_);
if (shutdown_) return;
if (grpc_event_engine_timer_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(timer)) {
gpr_log(GPR_DEBUG, "TimerManager::%p shutting down", this);
}
shutdown_ = true;
@ -129,7 +127,7 @@ void TimerManager::Shutdown() {
cv_wait_.Signal();
}
main_loop_exit_signal_->WaitForNotification();
if (grpc_event_engine_timer_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(timer)) {
gpr_log(GPR_DEBUG, "TimerManager::%p shutdown complete", this);
}
}
@ -147,7 +145,7 @@ void TimerManager::Kick() {
void TimerManager::RestartPostFork() {
grpc_core::MutexLock lock(&mu_);
CHECK(GPR_LIKELY(shutdown_));
if (grpc_event_engine_timer_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(timer)) {
gpr_log(GPR_DEBUG, "TimerManager::%p restarting after shutdown", this);
}
shutdown_ = false;

@ -1,25 +0,0 @@
// Copyright 2022 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "src/core/lib/debug/trace.h"
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_event_engine_trace(false, "event_engine");
grpc_core::TraceFlag grpc_event_engine_dns_trace(false, "event_engine_dns");
grpc_core::TraceFlag grpc_event_engine_endpoint_trace(false,
"event_engine_endpoint");
grpc_core::TraceFlag grpc_event_engine_endpoint_data_trace(
false, "event_engine_endpoint_data");
grpc_core::TraceFlag grpc_event_engine_poller_trace(false,
"event_engine_poller");

@ -19,30 +19,19 @@
#include "src/core/lib/debug/trace.h"
extern grpc_core::TraceFlag grpc_event_engine_trace;
extern grpc_core::TraceFlag grpc_event_engine_dns_trace;
extern grpc_core::TraceFlag grpc_event_engine_endpoint_data_trace;
extern grpc_core::TraceFlag grpc_event_engine_poller_trace;
extern grpc_core::TraceFlag grpc_event_engine_endpoint_trace;
#define GRPC_EVENT_ENGINE_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_trace)) { \
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) { \
gpr_log(GPR_ERROR, "(event_engine) " format, __VA_ARGS__); \
}
#define GRPC_EVENT_ENGINE_ENDPOINT_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_endpoint_trace)) { \
if (GRPC_TRACE_FLAG_ENABLED(event_engine_endpoint)) { \
gpr_log(GPR_ERROR, "(event_engine endpoint) " format, __VA_ARGS__); \
}
#define GRPC_EVENT_ENGINE_POLLER_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_poller_trace)) { \
if (GRPC_TRACE_FLAG_ENABLED(event_engine_poller)) { \
gpr_log(GPR_ERROR, "(event_engine poller) " format, __VA_ARGS__); \
}
#define GRPC_EVENT_ENGINE_DNS_TRACE(format, ...) \
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_dns_trace)) { \
gpr_log(GPR_ERROR, "(event_engine dns) " format, __VA_ARGS__); \
}
#endif // GRPC_SRC_CORE_LIB_EVENT_ENGINE_TRACE_H

@ -156,7 +156,7 @@ bool WindowsEndpoint::Write(absl::AnyInvocable<void(absl::Status)> on_writable,
});
return false;
}
if (grpc_event_engine_endpoint_data_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine_endpoint_data)) {
for (size_t i = 0; i < data->Count(); i++) {
auto str = data->RefSlice(i).as_string_view();
gpr_log(GPR_INFO, "WindowsEndpoint::%p WRITE (peer=%s): %.*s", this,
@ -293,7 +293,7 @@ void WindowsEndpoint::HandleReadClosure::Run() {
}
if (result.bytes_transferred == 0) {
// Either the endpoint is shut down or we've seen the end of the stream
if (grpc_event_engine_endpoint_data_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine_endpoint_data)) {
DumpSliceBuffer(buffer_, absl::StrFormat("WindowsEndpoint::%p READ",
io_state->endpoint));
}

@ -218,7 +218,7 @@ WindowsEventEngine::~WindowsEventEngine() {
{
task_mu_.Lock();
if (!known_handles_.empty()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) {
for (auto handle : known_handles_) {
gpr_log(GPR_ERROR,
"WindowsEventEngine:%p uncleared TaskHandle at shutdown:%s",
@ -230,7 +230,7 @@ WindowsEventEngine::~WindowsEventEngine() {
auto deadline =
timer_manager_.Now() + grpc_core::Duration::FromSecondsAsDouble(10);
while (!known_handles_.empty() && timer_manager_.Now() < deadline) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_event_engine_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(event_engine)) {
GRPC_LOG_EVERY_N_SEC(1, GPR_DEBUG, "Waiting for timers. %d remaining",
known_handles_.size());
}
@ -337,8 +337,8 @@ WindowsEventEngine::GetDNSResolver(
return std::make_unique<WindowsEventEngine::WindowsDNSResolver>(
std::move(*ares_resolver));
#else // GRPC_ARES == 1 && defined(GRPC_WINDOWS_SOCKET_ARES_EV_DRIVER)
GRPC_EVENT_ENGINE_DNS_TRACE(
"WindowsEventEngine:%p creating NativeWindowsDNSResolver", this);
GRPC_TRACE_LOG(event_engine_dns, INFO)
<< "WindowsEventEngine::" << this << " creating NativeWindowsDNSResolver";
return std::make_unique<NativeWindowsDNSResolver>(shared_from_this());
#endif // GRPC_ARES == 1 && defined(GRPC_WINDOWS_SOCKET_ARES_EV_DRIVER)
}

@ -0,0 +1,70 @@
// Copyright 2024 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/strings/string_view.h"
namespace grpc_core {
namespace {
bool IsGlob(absl::string_view trace_glob) {
return std::any_of(trace_glob.begin(), trace_glob.end(),
[](const char c) { return c == '?' || c == '*'; });
}
} // namespace
bool GlobMatch(absl::string_view name, absl::string_view pattern) {
if (!IsGlob(pattern)) return name == pattern;
size_t name_idx = 0;
size_t trace_idx = 0;
// pointers for iterative wildcard * matching.
size_t name_next_idx = name_idx;
size_t trace_next_idx = trace_idx;
while (trace_idx < pattern.length() || name_idx < name.length()) {
if (trace_idx < pattern.length()) {
switch (pattern.at(trace_idx)) {
case '?':
if (name_idx < name.length()) {
++trace_idx;
++name_idx;
continue;
}
break;
case '*':
trace_next_idx = trace_idx;
name_next_idx = name_idx + 1;
++trace_idx;
continue;
default:
if (name_idx < name.length() &&
name.at(name_idx) == pattern.at(trace_idx)) {
++trace_idx;
++name_idx;
continue;
}
break;
}
}
// Failed to match a character. Restart if possible.
if (name_next_idx > 0 && name_next_idx <= name.length()) {
trace_idx = trace_next_idx;
name_idx = name_next_idx;
continue;
}
return false;
}
return true;
}
} // namespace grpc_core

@ -1,5 +1,4 @@
//
// Copyright 2019 gRPC authors.
// Copyright 2024 The gRPC Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,19 +11,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef GRPC_SRC_CORE_RESOLVER_XDS_XDS_RESOLVER_TRACE_H
#define GRPC_SRC_CORE_RESOLVER_XDS_XDS_RESOLVER_TRACE_H
#include <grpc/support/port_platform.h>
#ifndef GRPC_SRC_CORE_LIB_GPRPP_GLOB_H
#define GRPC_SRC_CORE_LIB_GPRPP_GLOB_H
#include "src/core/lib/debug/trace.h"
#include "absl/strings/string_view.h"
namespace grpc_core {
extern TraceFlag grpc_xds_resolver_trace;
// A basic glob matcher based on https://research.swtch.com/glob.
// This supports `?` (a single wildcard character), and `*` (multiple wildcard
// characters).
bool GlobMatch(absl::string_view name, absl::string_view pattern);
} // namespace grpc_core
#endif // GRPC_SRC_CORE_RESOLVER_XDS_XDS_RESOLVER_TRACE_H
#endif // GRPC_SRC_CORE_LIB_GPRPP_GLOB_H

@ -46,8 +46,6 @@
namespace grpc_core {
DebugOnlyTraceFlag grpc_work_serializer_trace(false, "work_serializer");
//
// WorkSerializer::WorkSerializerImpl
//
@ -138,7 +136,7 @@ class WorkSerializer::LegacyWorkSerializer final : public WorkSerializerImpl {
void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::Run() %p Scheduling callback [%s:%d]",
this, location.file(), location.line());
}
@ -151,9 +149,7 @@ void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
if (GetOwners(prev_ref_pair) == 0) {
// We took ownership of the WorkSerializer. Invoke callback and drain queue.
SetCurrentThread();
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
LOG(INFO) << " Executing immediately";
}
GRPC_TRACE_LOG(work_serializer, INFO) << " Executing immediately";
callback();
// Delete the callback while still holding the WorkSerializer, so
// that any refs being held by the callback via lambda captures will
@ -161,12 +157,12 @@ void WorkSerializer::LegacyWorkSerializer::Run(std::function<void()> callback,
callback = nullptr;
DrainQueueOwned();
} else {
// Another thread is holding the WorkSerializer, so decrement the ownership
// count we just added and queue the callback.
// Another thread is holding the WorkSerializer, so decrement the
// ownership count we just added and queue the callback.
refs_.fetch_sub(MakeRefPair(1, 0), std::memory_order_acq_rel);
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, " Scheduling on queue : item %p", cb_wrapper);
}
queue_.Push(&cb_wrapper->mpscq_node);
@ -177,7 +173,7 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
std::function<void()> callback, const DebugLocation& location) {
CallbackWrapper* cb_wrapper =
new CallbackWrapper(std::move(callback), location);
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO,
"WorkSerializer::Schedule() %p Scheduling callback %p [%s:%d]",
this, cb_wrapper, location.file(), location.line());
@ -187,15 +183,13 @@ void WorkSerializer::LegacyWorkSerializer::Schedule(
}
void WorkSerializer::LegacyWorkSerializer::Orphan() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::Orphan() %p", this);
}
const uint64_t prev_ref_pair =
refs_.fetch_sub(MakeRefPair(0, 1), std::memory_order_acq_rel);
if (GetOwners(prev_ref_pair) == 0 && GetSize(prev_ref_pair) == 1) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
LOG(INFO) << " Destroying";
}
GRPC_TRACE_LOG(work_serializer, INFO) << " Destroying";
delete this;
}
}
@ -203,7 +197,7 @@ void WorkSerializer::LegacyWorkSerializer::Orphan() {
// The thread that calls this loans itself to the work serializer so as to
// execute all the scheduled callbacks.
void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::DrainQueue() %p", this);
}
// Attempt to take ownership of the WorkSerializer. Also increment the queue
@ -215,8 +209,8 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
// We took ownership of the WorkSerializer. Drain the queue.
DrainQueueOwned();
} else {
// Another thread is holding the WorkSerializer, so decrement the ownership
// count we just added and queue a no-op callback.
// Another thread is holding the WorkSerializer, so decrement the
// ownership count we just added and queue a no-op callback.
refs_.fetch_sub(MakeRefPair(1, 0), std::memory_order_acq_rel);
CallbackWrapper* cb_wrapper = new CallbackWrapper([]() {}, DEBUG_LOCATION);
queue_.Push(&cb_wrapper->mpscq_node);
@ -224,7 +218,7 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueue() {
}
void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer::DrainQueueOwned() %p", this);
}
while (true) {
@ -232,9 +226,7 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
// It is possible that while draining the queue, the last callback ended
// up orphaning the work serializer. In that case, delete the object.
if (GetSize(prev_ref_pair) == 1) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
LOG(INFO) << " Queue Drained. Destroying";
}
GRPC_TRACE_LOG(work_serializer, INFO) << " Queue Drained. Destroying";
delete this;
return;
}
@ -252,9 +244,7 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
}
if (GetSize(expected) == 0) {
// WorkSerializer got orphaned while this was running
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
LOG(INFO) << " Queue Drained. Destroying";
}
GRPC_TRACE_LOG(work_serializer, INFO) << " Queue Drained. Destroying";
delete this;
return;
}
@ -272,11 +262,10 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
queue_.PopAndCheckEnd(&empty_unused))) == nullptr) {
// This can happen due to a race condition within the mpscq
// implementation or because of a race with Run()/Schedule().
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
LOG(INFO) << " Queue returned nullptr, trying again";
}
GRPC_TRACE_LOG(work_serializer, INFO)
<< " Queue returned nullptr, trying again";
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, " Running item %p : callback scheduled at [%s:%d]",
cb_wrapper, cb_wrapper->location.file(),
cb_wrapper->location.line());
@ -292,9 +281,9 @@ void WorkSerializer::LegacyWorkSerializer::DrainQueueOwned() {
// DispatchingWorkSerializer: executes callbacks one at a time on EventEngine.
// One at a time guarantees that fixed size thread pools in EventEngine
// implementations are not starved of threads by long running work serializers.
// We implement EventEngine::Closure directly to avoid allocating once per
// callback in the queue when scheduling.
// implementations are not starved of threads by long running work
// serializers. We implement EventEngine::Closure directly to avoid allocating
// once per callback in the queue when scheduling.
class WorkSerializer::DispatchingWorkSerializer final
: public WorkSerializerImpl,
public grpc_event_engine::experimental::EventEngine::Closure {
@ -307,7 +296,8 @@ class WorkSerializer::DispatchingWorkSerializer final
const DebugLocation& location) override;
void Schedule(std::function<void()> callback,
const DebugLocation& location) override {
// We always dispatch to event engine, so Schedule and Run share semantics.
// We always dispatch to event engine, so Schedule and Run share
// semantics.
Run(callback, location);
}
void DrainQueue() override {}
@ -357,11 +347,11 @@ class WorkSerializer::DispatchingWorkSerializer final
// separated from incoming cache lines.
// Callbacks that are currently being processed.
// Only accessed by: a Run() call going from not-running to running, or a work
// item being executed in EventEngine -- ie this does not need a mutex because
// all access is serialized.
// Stored in reverse execution order so that callbacks can be `pop_back()`'d
// on completion to free up any resources they hold.
// Only accessed by: a Run() call going from not-running to running, or a
// work item being executed in EventEngine -- ie this does not need a mutex
// because all access is serialized. Stored in reverse execution order so
// that callbacks can be `pop_back()`'d on completion to free up any
// resources they hold.
CallbackVector processing_;
// EventEngine instance upon which we'll do our work.
const std::shared_ptr<grpc_event_engine::experimental::EventEngine>
@ -375,16 +365,16 @@ class WorkSerializer::DispatchingWorkSerializer final
// on an idle WorkSerializer, and transitions back to false after the last
// callback scheduled is completed and the WorkSerializer is again idle.
// - orphaned_ transitions to true once upon Orphan being called.
// When orphaned_ is true and running_ is false, the DispatchingWorkSerializer
// instance is deleted.
// When orphaned_ is true and running_ is false, the
// DispatchingWorkSerializer instance is deleted.
bool running_ ABSL_GUARDED_BY(mu_) = false;
bool orphaned_ ABSL_GUARDED_BY(mu_) = false;
Mutex mu_;
// Queued callbacks. New work items land here, and when processing_ is drained
// we move this entire queue into processing_ and work on draining it again.
// In low traffic scenarios this gives two mutex acquisitions per work item,
// but as load increases we get some natural batching and the rate of mutex
// acquisitions per work item tends towards 1.
// Queued callbacks. New work items land here, and when processing_ is
// drained we move this entire queue into processing_ and work on draining
// it again. In low traffic scenarios this gives two mutex acquisitions per
// work item, but as load increases we get some natural batching and the
// rate of mutex acquisitions per work item tends towards 1.
CallbackVector incoming_ ABSL_GUARDED_BY(mu_);
#ifndef NDEBUG
@ -413,15 +403,15 @@ void WorkSerializer::DispatchingWorkSerializer::Orphan() {
// Implementation of WorkSerializerImpl::Run
void WorkSerializer::DispatchingWorkSerializer::Run(
std::function<void()> callback, const DebugLocation& location) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer[%p] Scheduling callback [%s:%d]", this,
location.file(), location.line());
}
global_stats().IncrementWorkSerializerItemsEnqueued();
MutexLock lock(&mu_);
if (!running_) {
// If we were previously idle, insert this callback directly into the empty
// processing_ list and start running.
// If we were previously idle, insert this callback directly into the
// empty processing_ list and start running.
running_ = true;
running_start_time_ = std::chrono::steady_clock::now();
items_processed_during_run_ = 0;
@ -441,10 +431,10 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
// TODO(ctiller): remove these when we can deprecate ExecCtx
ApplicationCallbackExecCtx app_exec_ctx;
ExecCtx exec_ctx;
// Grab the last element of processing_ - which is the next item in our queue
// since processing_ is stored in reverse order.
// Grab the last element of processing_ - which is the next item in our
// queue since processing_ is stored in reverse order.
auto& cb = processing_.back();
if (GRPC_TRACE_FLAG_ENABLED(grpc_work_serializer_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(work_serializer)) {
gpr_log(GPR_INFO, "WorkSerializer[%p] Executing callback [%s:%d]", this,
cb.location.file(), cb.location.line());
}
@ -452,9 +442,9 @@ void WorkSerializer::DispatchingWorkSerializer::Run() {
const auto start = std::chrono::steady_clock::now();
SetCurrentThread();
cb.callback();
// pop_back here destroys the callback - freeing any resources it might hold.
// We do so before clearing the current thread in case the callback destructor
// wants to check that it's in the WorkSerializer too.
// pop_back here destroys the callback - freeing any resources it might
// hold. We do so before clearing the current thread in case the callback
// destructor wants to check that it's in the WorkSerializer too.
processing_.pop_back();
ClearCurrentThread();
global_stats().IncrementWorkSerializerItemsDequeued();
@ -508,8 +498,8 @@ bool WorkSerializer::DispatchingWorkSerializer::Refill() {
case RefillResult::kRefilled:
// Reverse processing_ so that we can pop_back() items in the correct
// order. (note that this is mostly pointer swaps inside the
// std::function's, so should be relatively cheap even for longer lists).
// Do so here so we're outside of the RefillInner lock.
// std::function's, so should be relatively cheap even for longer
// lists). Do so here so we're outside of the RefillInner lock.
std::reverse(processing_.begin(), processing_.end());
return true;
case RefillResult::kFinished:

@ -32,8 +32,6 @@
namespace grpc_core {
DebugOnlyTraceFlag grpc_call_combiner_trace(false, "call_combiner");
namespace {
// grpc_error LSB can be used
@ -116,7 +114,7 @@ void CallCombiner::ScheduleClosure(grpc_closure* closure,
void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
DEBUG_ARGS const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO,
"==> CallCombiner::Start() [%p] closure=%s [" DEBUG_FMT_STR
"%s] error=%s",
@ -125,20 +123,16 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
}
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)1));
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
if (prev_size == 0) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
LOG(INFO) << " EXECUTING IMMEDIATELY";
}
GRPC_TRACE_LOG(call_combiner, INFO) << " EXECUTING IMMEDIATELY";
// Queue was empty, so execute this closure immediately.
ScheduleClosure(closure, error);
} else {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
LOG(INFO) << " QUEUING";
}
GRPC_TRACE_LOG(call_combiner, INFO) << " QUEUING";
// Queue was not empty, so add closure to queue.
closure->error_data.error = internal::StatusAllocHeapPtr(error);
queue_.Push(
@ -147,45 +141,41 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
}
void CallCombiner::Stop(DEBUG_ARGS const char* reason) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO, "==> CallCombiner::Stop() [%p] [" DEBUG_FMT_STR "%s]",
this DEBUG_FMT_ARGS, reason);
}
size_t prev_size =
static_cast<size_t>(gpr_atm_full_fetch_add(&size_, (gpr_atm)-1));
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1);
}
CHECK_GE(prev_size, 1u);
if (prev_size > 1) {
while (true) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
LOG(INFO) << " checking queue";
}
GRPC_TRACE_LOG(call_combiner, INFO) << " checking queue";
bool empty;
grpc_closure* closure =
reinterpret_cast<grpc_closure*>(queue_.PopAndCheckEnd(&empty));
if (closure == nullptr) {
// This can happen either due to a race condition within the mpscq
// code or because of a race with Start().
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
LOG(INFO) << " queue returned no result; checking again";
}
GRPC_TRACE_LOG(call_combiner, INFO)
<< " queue returned no result; checking again";
continue;
}
grpc_error_handle error =
internal::StatusMoveFromHeapPtr(closure->error_data.error);
closure->error_data.error = 0;
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%s error=%s",
closure->DebugString().c_str(), StatusToString(error).c_str());
}
ScheduleClosure(closure, error);
break;
}
} else if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
LOG(INFO) << " queue empty";
GRPC_TRACE_LOG(call_combiner, INFO) << " queue empty";
}
}
@ -197,7 +187,7 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (!original_error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
@ -208,7 +198,7 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
} else {
if (gpr_atm_full_cas(&cancel_state_, original_state,
reinterpret_cast<gpr_atm>(closure))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
this, closure);
}
@ -217,7 +207,7 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
// up any resources they may be holding for the callback.
if (original_state != 0) {
closure = reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p", this,
closure);
@ -245,7 +235,7 @@ void CallCombiner::Cancel(grpc_error_handle error) {
if (original_state != 0) {
grpc_closure* notify_on_cancel =
reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
this, notify_on_cancel);

@ -44,8 +44,6 @@
namespace grpc_core {
extern DebugOnlyTraceFlag grpc_call_combiner_trace;
class CallCombiner {
public:
CallCombiner();
@ -167,7 +165,7 @@ class CallCombinerClosureList {
GRPC_CALL_COMBINER_START(call_combiner, closure.closure, closure.error,
closure.reason);
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
if (GRPC_TRACE_FLAG_ENABLED(call_combiner)) {
gpr_log(GPR_INFO,
"CallCombinerClosureList executing closure while already "
"holding call_combiner %p: closure=%s error=%s reason=%s",

@ -35,8 +35,6 @@
#include "src/core/lib/iomgr/ev_apple.h"
#include "src/core/lib/iomgr/exec_ctx.h"
extern grpc_core::TraceFlag grpc_tcp_trace;
GrpcLibraryInitHolder::GrpcLibraryInitHolder() { grpc_init(); }
GrpcLibraryInitHolder::~GrpcLibraryInitHolder() { grpc_shutdown(); }
@ -65,7 +63,7 @@ void CFStreamHandle::ReadCallback(CFReadStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(client_callback_info);
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG, "CFStream ReadCallback (%p, %p, %lu, %p)", handle,
stream, type, client_callback_info);
}
@ -99,7 +97,7 @@ void CFStreamHandle::WriteCallback(CFWriteStreamRef stream,
grpc_error_handle error;
CFErrorRef stream_error;
CFStreamHandle* handle = static_cast<CFStreamHandle*>(clientCallBackInfo);
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG, "CFStream WriteCallback (%p, %p, %lu, %p)", handle,
stream, type, clientCallBackInfo);
}
@ -176,7 +174,7 @@ void CFStreamHandle::Shutdown(grpc_error_handle error) {
}
void CFStreamHandle::Ref(const char* file, int line, const char* reason) {
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount_.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"CFStream Handle ref %p : %s %" PRIdPTR " -> %" PRIdPTR, this,
@ -186,7 +184,7 @@ void CFStreamHandle::Ref(const char* file, int line, const char* reason) {
}
void CFStreamHandle::Unref(const char* file, int line, const char* reason) {
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_atm val = gpr_atm_no_barrier_load(&refcount_.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"CFStream Handle unref %p : %s %" PRIdPTR " -> %" PRIdPTR, this,

@ -37,8 +37,6 @@
struct grpc_closure;
typedef struct grpc_closure grpc_closure;
extern grpc_core::DebugOnlyTraceFlag grpc_trace_closure;
typedef struct grpc_closure_list {
grpc_closure* head;
grpc_closure* tail;
@ -294,7 +292,7 @@ class Closure {
return;
}
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(closure)) {
gpr_log(GPR_DEBUG, "running closure %p: created [%s:%d]: run [%s:%d]",
closure, closure->file_created, closure->line_created,
location.file(), location.line());
@ -303,7 +301,7 @@ class Closure {
#endif
closure->cb(closure->cb_arg, error);
#ifndef NDEBUG
if (grpc_trace_closure.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(closure)) {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
}
#endif

@ -34,11 +34,9 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
grpc_core::DebugOnlyTraceFlag grpc_combiner_trace(false, "combiner");
#define GRPC_COMBINER_TRACE(fn) \
do { \
if (grpc_combiner_trace.enabled()) { \
if (GRPC_TRACE_FLAG_ENABLED(combiner)) { \
fn; \
} \
} while (0)
@ -81,7 +79,7 @@ static void start_destroy(grpc_core::Combiner* lock) {
#ifndef NDEBUG
#define GRPC_COMBINER_DEBUG_SPAM(op, delta) \
if (grpc_combiner_trace.enabled()) { \
if (GRPC_TRACE_FLAG_ENABLED(combiner)) { \
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, \
"C:%p %s %" PRIdPTR " --> %" PRIdPTR " %s", lock, (op), \
gpr_atm_no_barrier_load(&lock->refs.count), \

@ -88,6 +88,4 @@ void grpc_combiner_unref(grpc_core::Combiner* lock GRPC_COMBINER_DEBUG_ARGS);
bool grpc_combiner_continue_exec_ctx();
extern grpc_core::DebugOnlyTraceFlag grpc_combiner_trace;
#endif // GRPC_SRC_CORE_LIB_IOMGR_COMBINER_H

@ -20,8 +20,6 @@
#include <grpc/support/port_platform.h>
grpc_core::TraceFlag grpc_tcp_trace(false, "tcp");
void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, bool urgent, int min_progress_size) {
ep->vtable->read(ep, slices, cb, urgent, min_progress_size);

@ -42,8 +42,6 @@
#include "src/core/lib/slice/slice_string_helpers.h"
#include "src/core/util/string.h"
extern grpc_core::TraceFlag grpc_tcp_trace;
struct CFStreamEndpoint {
grpc_endpoint base;
gpr_refcount refcount;
@ -75,7 +73,7 @@ static void CFStreamFree(CFStreamEndpoint* ep) {
#define EP_UNREF(ep, reason) CFStreamUnref((ep), (reason), __FILE__, __LINE__)
static void CFStreamUnref(CFStreamEndpoint* ep, const char* reason,
const char* file, int line) {
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"CFStream endpoint unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep,
@ -87,7 +85,7 @@ static void CFStreamUnref(CFStreamEndpoint* ep, const char* reason,
}
static void CFStreamRef(CFStreamEndpoint* ep, const char* reason,
const char* file, int line) {
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_atm val = gpr_atm_no_barrier_load(&ep->refcount.count);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"CFStream endpoint ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep,
@ -112,8 +110,7 @@ static grpc_error_handle CFStreamAnnotateError(grpc_error_handle src_error) {
}
static void CallReadCb(CFStreamEndpoint* ep, grpc_error_handle error) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) &&
gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
if (GRPC_TRACE_FLAG_ENABLED(tcp) && gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p call_read_cb %p %p:%p", ep,
ep->read_cb, ep->read_cb->cb, ep->read_cb->cb_arg);
size_t i;
@ -135,7 +132,7 @@ static void CallReadCb(CFStreamEndpoint* ep, grpc_error_handle error) {
}
static void CallWriteCb(CFStreamEndpoint* ep, grpc_error_handle error) {
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p call_write_cb %p %p:%p", ep,
ep->write_cb, ep->write_cb->cb, ep->write_cb->cb_arg);
gpr_log(GPR_DEBUG, "write: error=%s",
@ -224,7 +221,7 @@ static void WriteAction(void* arg, grpc_error_handle error) {
EP_UNREF(ep, "write");
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace) &&
if (GRPC_TRACE_FLAG_ENABLED(tcp) &&
gpr_should_log(GPR_LOG_SEVERITY_DEBUG)) {
grpc_slice trace_slice = grpc_slice_sub(slice, 0, write_size);
char* dump = grpc_dump_slice(trace_slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
@ -241,7 +238,7 @@ static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, bool /*urgent*/,
int /*min_progress_size*/) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p read (%p, %p) length:%zu", ep_impl,
slices, cb, slices->length);
}
@ -259,7 +256,7 @@ static void CFStreamWrite(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, void* /*arg*/,
int /*max_frame_size*/) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p write (%p, %p) length:%zu",
ep_impl, slices, cb, slices->length);
}
@ -272,15 +269,11 @@ static void CFStreamWrite(grpc_endpoint* ep, grpc_slice_buffer* slices,
void CFStreamDestroy(grpc_endpoint* ep) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p destroy", ep_impl);
}
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " destroy";
CFReadStreamClose(ep_impl->read_stream);
CFWriteStreamClose(ep_impl->write_stream);
ep_impl->stream_sync->Shutdown(absl::UnavailableError("endpoint shutdown"));
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p destroy DONE", ep_impl);
}
GRPC_TRACE_VLOG(tcp, 2) << "CFStream endpoint:" << ep_impl << " destroy DONE";
EP_UNREF(ep_impl, "destroy");
}
@ -320,7 +313,7 @@ grpc_endpoint* grpc_cfstream_endpoint_create(CFReadStreamRef read_stream,
const char* peer_string,
CFStreamHandle* stream_sync) {
CFStreamEndpoint* ep_impl = new CFStreamEndpoint;
if (grpc_tcp_trace.enabled()) {
if (GRPC_TRACE_FLAG_ENABLED(tcp)) {
gpr_log(GPR_DEBUG,
"CFStream endpoint:%p create readStream:%p writeStream: %p",
ep_impl, read_stream, write_stream);

@ -41,10 +41,6 @@
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/util/useful.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_error_refcount(false,
"error_refcount");
grpc_core::DebugOnlyTraceFlag grpc_trace_closure(false, "closure");
absl::Status grpc_status_create(absl::StatusCode code, absl::string_view msg,
const grpc_core::DebugLocation& location,
size_t children_count, absl::Status* children) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save