[cleanup] Remove profiling timers (#30779)

* [cleanup] Remove profiling timers

- nobody has used this system in years
- if we needed it, we'd probably rewrite it at this point to be something more modern
- let's remove it until that need arises

* fix

* fixes
pull/30611/head
Craig Tiller 3 years ago committed by GitHub
parent 0c46726229
commit 004788af3d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      BUILD
  2. 2
      CMakeLists.txt
  3. 30
      Makefile
  4. 3
      build_autogenerated.yaml
  5. 6
      build_handwritten.yaml
  6. 3
      config.m4
  7. 3
      config.w32
  8. 2
      gRPC-C++.podspec
  9. 4
      gRPC-Core.podspec
  10. 3
      grpc.gemspec
  11. 2
      grpc.gyp
  12. 3
      package.xml
  13. 2
      src/core/ext/filters/client_channel/client_channel.cc
  14. 2
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  15. 3
      src/core/ext/filters/client_channel/subchannel.cc
  16. 2
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  17. 2
      src/core/ext/filters/http/message_compress/message_decompress_filter.cc
  18. 2
      src/core/ext/transport/binder/transport/binder_transport.cc
  19. 19
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  20. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  21. 3
      src/core/ext/transport/chttp2/transport/writing.cc
  22. 10
      src/core/lib/gpr/alloc.cc
  23. 8
      src/core/lib/gpr/sync_abseil.cc
  24. 5
      src/core/lib/gpr/sync_posix.cc
  25. 4
      src/core/lib/iomgr/call_combiner.cc
  26. 1
      src/core/lib/iomgr/closure.h
  27. 14
      src/core/lib/iomgr/ev_epoll1_linux.cc
  28. 13
      src/core/lib/iomgr/ev_poll_posix.cc
  29. 2
      src/core/lib/iomgr/exec_ctx.cc
  30. 5
      src/core/lib/iomgr/tcp_posix.cc
  31. 2
      src/core/lib/iomgr/wakeup_fd_eventfd.cc
  32. 295
      src/core/lib/profiling/basic_timers.cc
  33. 7
      src/core/lib/profiling/stap_probes.d
  34. 50
      src/core/lib/profiling/stap_timers.cc
  35. 94
      src/core/lib/profiling/timers.h
  36. 3
      src/core/lib/security/transport/secure_endpoint.cc
  37. 11
      src/core/lib/surface/call.cc
  38. 15
      src/core/lib/surface/completion_queue.cc
  39. 3
      src/core/lib/surface/init.cc
  40. 2
      src/cpp/server/server_cc.cc
  41. 2
      src/python/grpcio/grpc_core_dependencies.py
  42. 12
      templates/Makefile.template
  43. 5
      test/core/fling/client.cc
  44. 3
      test/core/fling/server.cc
  45. 8
      test/cpp/microbenchmarks/bm_call_create.cc
  46. 2
      test/cpp/microbenchmarks/callback_streaming_ping_pong.h
  47. 2
      test/cpp/microbenchmarks/callback_unary_ping_pong.h
  48. 2
      test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
  49. 3
      test/cpp/microbenchmarks/fullstack_streaming_pump.h
  50. 2
      test/cpp/microbenchmarks/fullstack_unary_ping_pong.h
  51. 1
      test/cpp/qps/client_callback.cc
  52. 5
      test/cpp/qps/client_sync.cc
  53. 3
      test/cpp/qps/driver.cc
  54. 6
      tools/bazel.rc
  55. 3
      tools/doxygen/Doxyfile.c++.internal
  56. 3
      tools/doxygen/Doxyfile.core.internal
  57. 276
      tools/profiling/latency_profile/profile_analyzer.py
  58. 6
      tools/run_tests/generated/configs.json

@ -954,8 +954,6 @@ grpc_cc_library(
"src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc",
"src/core/lib/gprpp/time_util.cc",
"src/core/lib/profiling/basic_timers.cc",
"src/core/lib/profiling/stap_timers.cc",
],
hdrs = [
"src/core/lib/gpr/alloc.h",
@ -979,7 +977,6 @@ grpc_cc_library(
"src/core/lib/gprpp/sync.h",
"src/core/lib/gprpp/thd.h",
"src/core/lib/gprpp/time_util.h",
"src/core/lib/profiling/timers.h",
],
external_deps = [
"absl/base",

2
CMakeLists.txt generated

@ -1561,8 +1561,6 @@ add_library(gpr
src/core/lib/gprpp/thd_posix.cc
src/core/lib/gprpp/thd_windows.cc
src/core/lib/gprpp/time_util.cc
src/core/lib/profiling/basic_timers.cc
src/core/lib/profiling/stap_timers.cc
)
set_target_properties(gpr PROPERTIES

30
Makefile generated

@ -98,14 +98,6 @@ LDXX_asan-trace-cmp = clang++
CPPFLAGS_asan-trace-cmp = -O0 -fsanitize-coverage=edge,trace-pc-guard -fsanitize-coverage=trace-cmp -fsanitize=address -fno-omit-frame-pointer -Wno-unused-command-line-argument -DGPR_NO_DIRECT_SYSCALLS
LDFLAGS_asan-trace-cmp = -fsanitize=address
VALID_CONFIG_basicprof = 1
CC_basicprof = $(DEFAULT_CC)
CXX_basicprof = $(DEFAULT_CXX)
LD_basicprof = $(DEFAULT_CC)
LDXX_basicprof = $(DEFAULT_CXX)
CPPFLAGS_basicprof = -O2 -DGRPC_BASIC_PROFILER -DGRPC_TIMERS_RDTSC
DEFINES_basicprof = NDEBUG
VALID_CONFIG_c++-compat = 1
CC_c++-compat = $(DEFAULT_CC)
CXX_c++-compat = $(DEFAULT_CXX)
@ -211,14 +203,6 @@ LDXX_opt = $(DEFAULT_CXX)
CPPFLAGS_opt = -O2 -Wframe-larger-than=16384
DEFINES_opt = NDEBUG
VALID_CONFIG_stapprof = 1
CC_stapprof = $(DEFAULT_CC)
CXX_stapprof = $(DEFAULT_CXX)
LD_stapprof = $(DEFAULT_CC)
LDXX_stapprof = $(DEFAULT_CXX)
CPPFLAGS_stapprof = -O2 -DGRPC_STAP_PROFILER
DEFINES_stapprof = NDEBUG
VALID_CONFIG_tsan = 1
REQUIRE_CUSTOM_LIBRARIES_tsan = 1
CC_tsan = clang
@ -780,18 +764,6 @@ cache.mk::
$(E) "[MAKE] Generating $@"
$(Q) echo "$(CACHE_MK)" | tr , '\n' >$@
ifeq ($(CONFIG),stapprof)
src/core/profiling/stap_timers.c: $(GENDIR)/src/core/profiling/stap_probes.h
ifeq ($(HAS_SYSTEMTAP),true)
$(GENDIR)/src/core/profiling/stap_probes.h: src/core/profiling/stap_probes.d
$(E) "[DTRACE] Compiling $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(DTRACE) -C -h -s $< -o $@
else
$(GENDIR)/src/core/profiling/stap_probes.h: systemtap_dep_error stop
endif
endif
$(OBJDIR)/$(CONFIG)/%.o : %.c
$(E) "[C] Compiling $<"
$(Q) mkdir -p `dirname $@`
@ -936,8 +908,6 @@ LIBGPR_SRC = \
src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \
src/core/lib/gprpp/time_util.cc \
src/core/lib/profiling/basic_timers.cc \
src/core/lib/profiling/stap_timers.cc \
PUBLIC_HEADERS_C += \
include/grpc/impl/codegen/atm.h \

@ -231,7 +231,6 @@ libs:
- src/core/lib/gprpp/sync.h
- src/core/lib/gprpp/thd.h
- src/core/lib/gprpp/time_util.h
- src/core/lib/profiling/timers.h
src:
- src/core/lib/gpr/alloc.cc
- src/core/lib/gpr/atm.cc
@ -274,8 +273,6 @@ libs:
- src/core/lib/gprpp/thd_posix.cc
- src/core/lib/gprpp/thd_windows.cc
- src/core/lib/gprpp/time_util.cc
- src/core/lib/profiling/basic_timers.cc
- src/core/lib/profiling/stap_timers.cc
deps:
- absl/base:base
- absl/base:core_headers

@ -64,9 +64,6 @@ configs:
test_environ:
ASAN_OPTIONS: detect_leaks=1:color=always
LSAN_OPTIONS: suppressions=test/core/util/lsan_suppressions.txt:report_objects=1
basicprof:
CPPFLAGS: -O2 -DGRPC_BASIC_PROFILER -DGRPC_TIMERS_RDTSC
DEFINES: NDEBUG
c++-compat:
CFLAGS: -Wc++-compat
CPPFLAGS: -O0
@ -128,9 +125,6 @@ configs:
opt:
CPPFLAGS: -O2 -Wframe-larger-than=16384
DEFINES: NDEBUG
stapprof:
CPPFLAGS: -O2 -DGRPC_STAP_PROFILER
DEFINES: NDEBUG
tsan:
CC: clang
CPPFLAGS: -O0 -fsanitize=thread -fno-omit-frame-pointer -Wno-unused-command-line-argument

3
config.m4 generated

@ -616,8 +616,6 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/load_balancing/lb_policy.cc \
src/core/lib/load_balancing/lb_policy_registry.cc \
src/core/lib/matchers/matchers.cc \
src/core/lib/profiling/basic_timers.cc \
src/core/lib/profiling/stap_timers.cc \
src/core/lib/promise/activity.cc \
src/core/lib/promise/sleep.cc \
src/core/lib/resolver/resolver.cc \
@ -1340,7 +1338,6 @@ if test "$PHP_GRPC" != "no"; then
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/json)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/load_balancing)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/matchers)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/profiling)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/promise)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/resolver)
PHP_ADD_BUILD_DIR($ext_builddir/src/core/lib/resource_quota)

3
config.w32 generated

@ -582,8 +582,6 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\load_balancing\\lb_policy.cc " +
"src\\core\\lib\\load_balancing\\lb_policy_registry.cc " +
"src\\core\\lib\\matchers\\matchers.cc " +
"src\\core\\lib\\profiling\\basic_timers.cc " +
"src\\core\\lib\\profiling\\stap_timers.cc " +
"src\\core\\lib\\promise\\activity.cc " +
"src\\core\\lib\\promise\\sleep.cc " +
"src\\core\\lib\\resolver\\resolver.cc " +
@ -1462,7 +1460,6 @@ if (PHP_GRPC != "no") {
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\json");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\load_balancing");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\matchers");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\profiling");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\promise");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\resolver");
FSO.CreateFolder(base_dir+"\\ext\\grpc\\src\\core\\lib\\resource_quota");

2
gRPC-C++.podspec generated

@ -820,7 +820,6 @@ Pod::Spec.new do |s|
'src/core/lib/load_balancing/lb_policy_registry.h',
'src/core/lib/load_balancing/subchannel_interface.h',
'src/core/lib/matchers/matchers.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/promise/activity.h',
'src/core/lib/promise/arena_promise.h',
'src/core/lib/promise/call_push_pull.h',
@ -1678,7 +1677,6 @@ Pod::Spec.new do |s|
'src/core/lib/load_balancing/lb_policy_registry.h',
'src/core/lib/load_balancing/subchannel_interface.h',
'src/core/lib/matchers/matchers.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/promise/activity.h',
'src/core/lib/promise/arena_promise.h',
'src/core/lib/promise/call_push_pull.h',

4
gRPC-Core.podspec generated

@ -1334,9 +1334,6 @@ Pod::Spec.new do |s|
'src/core/lib/load_balancing/subchannel_interface.h',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/matchers/matchers.h',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/profiling/timers.h',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/activity.h',
'src/core/lib/promise/arena_promise.h',
@ -2302,7 +2299,6 @@ Pod::Spec.new do |s|
'src/core/lib/load_balancing/lb_policy_registry.h',
'src/core/lib/load_balancing/subchannel_interface.h',
'src/core/lib/matchers/matchers.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/promise/activity.h',
'src/core/lib/promise/arena_promise.h',
'src/core/lib/promise/call_push_pull.h',

3
grpc.gemspec generated

@ -1247,9 +1247,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/load_balancing/subchannel_interface.h )
s.files += %w( src/core/lib/matchers/matchers.cc )
s.files += %w( src/core/lib/matchers/matchers.h )
s.files += %w( src/core/lib/profiling/basic_timers.cc )
s.files += %w( src/core/lib/profiling/stap_timers.cc )
s.files += %w( src/core/lib/profiling/timers.h )
s.files += %w( src/core/lib/promise/activity.cc )
s.files += %w( src/core/lib/promise/activity.h )
s.files += %w( src/core/lib/promise/arena_promise.h )

2
grpc.gyp generated

@ -349,8 +349,6 @@
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{

3
package.xml generated

@ -1229,9 +1229,6 @@
<file baseinstalldir="/" name="src/core/lib/load_balancing/subchannel_interface.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/matchers/matchers.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/matchers/matchers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/basic_timers.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/stap_timers.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/activity.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/activity.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/promise/arena_promise.h" role="src" />

@ -73,7 +73,6 @@
#include "src/core/lib/json/json.h"
#include "src/core/lib/load_balancing/lb_policy_registry.h"
#include "src/core/lib/load_balancing/subchannel_interface.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resolver/resolver_registry.h"
#include "src/core/lib/resolver/server_address.h"
#include "src/core/lib/service_config/service_config_call_data.h"
@ -1854,7 +1853,6 @@ void ClientChannel::CallData::Destroy(
void ClientChannel::CallData::StartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("cc_start_transport_stream_op_batch", 0);
CallData* calld = static_cast<CallData*>(elem->call_data);
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace) &&

@ -31,7 +31,6 @@
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@ -102,7 +101,6 @@ static void clr_destroy_call_elem(grpc_call_element* elem,
static void clr_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = static_cast<call_data*>(elem->call_data);
GPR_TIMER_SCOPE("clr_start_transport_stream_op_batch", 0);
// Handle send_initial_metadata.
if (batch->send_initial_metadata) {
// Grab client stats object from metadata.

@ -57,7 +57,6 @@
#include "src/core/lib/gprpp/sync.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/connectivity_state.h"
@ -175,7 +174,6 @@ SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
void SubchannelCall::StartTransportStreamOpBatch(
grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("subchannel_call_process_op", 0);
MaybeInterceptRecvTrailingMetadata(batch);
grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(this);
grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
@ -214,7 +212,6 @@ void SubchannelCall::Unref(const DebugLocation& /*location*/,
}
void SubchannelCall::Destroy(void* arg, grpc_error_handle /*error*/) {
GPR_TIMER_SCOPE("subchannel_call_destroy", 0);
SubchannelCall* self = static_cast<SubchannelCall*>(arg);
// Keep some members before destroying the subchannel call.
grpc_closure* after_call_stack_destroy = self->after_call_stack_destroy_;

@ -40,7 +40,6 @@
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/transport/metadata_batch.h"
@ -227,7 +226,6 @@ void CallData::ForwardSendMessageBatch(void* elem_arg,
void CallData::CompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) {
GRPC_ERROR_UNREF(cancel_error_);

@ -40,7 +40,6 @@
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
@ -274,7 +273,6 @@ void CallData::DecompressStartTransportStreamOpBatch(
void DecompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
GPR_TIMER_SCOPE("decompress_start_transport_stream_op_batch", 0);
CallData* calld = static_cast<CallData*>(elem->call_data);
calld->DecompressStartTransportStreamOpBatch(elem, batch);
}

@ -103,7 +103,6 @@ static void register_stream_locked(void* arg, grpc_error_handle /*error*/) {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena) {
GPR_TIMER_SCOPE("init_stream", 0);
gpr_log(GPR_INFO, "%s = %p %p %p %p %p", __func__, gt, gs, refcount,
server_data, arena);
// Note that this function is not locked and may be invoked concurrently
@ -563,7 +562,6 @@ static void perform_stream_op_locked(void* stream_op,
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
GPR_TIMER_SCOPE("perform_stream_op", 0);
grpc_binder_transport* gbt = reinterpret_cast<grpc_binder_transport*>(gt);
grpc_binder_stream* gbs = reinterpret_cast<grpc_binder_stream*>(gs);
gpr_log(GPR_INFO, "%s = %p %p %p is_client = %d", __func__, gt, gs, op,

@ -73,7 +73,6 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/resource_quota/memory_quota.h"
@ -716,21 +715,18 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena) {
GPR_TIMER_SCOPE("init_stream", 0);
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);
return 0;
}
static void destroy_stream_locked(void* sp, grpc_error_handle /*error*/) {
GPR_TIMER_SCOPE("destroy_stream", 0);
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
s->~grpc_chttp2_stream();
}
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
GPR_TIMER_SCOPE("destroy_stream", 0);
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
@ -795,8 +791,6 @@ static void set_write_state(grpc_chttp2_transport* t,
void grpc_chttp2_initiate_write(grpc_chttp2_transport* t,
grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_SCOPE("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING,
@ -850,7 +844,6 @@ static const char* begin_writing_desc(bool partial) {
static void write_action_begin_locked(void* gt,
grpc_error_handle /*error_ignored*/) {
GPR_TIMER_SCOPE("write_action_begin_locked", 0);
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(gt);
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
grpc_chttp2_begin_write_result r;
@ -885,7 +878,6 @@ static void write_action_begin_locked(void* gt,
}
static void write_action(void* gt, grpc_error_handle /*error*/) {
GPR_TIMER_SCOPE("write_action", 0);
static bool kEnablePeerStateBasedFraming =
GPR_GLOBAL_CONFIG_GET(grpc_experimental_enable_peer_state_based_framing);
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(gt);
@ -918,7 +910,6 @@ static void write_action_end(void* tp, grpc_error_handle error) {
// Callback from the grpc_endpoint after bytes have been written by calling
// sendmsg
static void write_action_end_locked(void* tp, grpc_error_handle error) {
GPR_TIMER_SCOPE("terminate_writing_with_lock", 0);
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
bool closed = false;
@ -940,11 +931,9 @@ static void write_action_end_locked(void* tp, grpc_error_handle error) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
GPR_UNREACHABLE_CODE(break);
case GRPC_CHTTP2_WRITE_STATE_WRITING:
GPR_TIMER_MARK("state=writing", 0);
set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "finish writing");
break;
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING, "continue writing");
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
// If the transport is closed, we will retry writing on the endpoint
@ -1214,8 +1203,6 @@ static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
static void perform_stream_op_locked(void* stream_op,
grpc_error_handle /*error_ignored*/) {
GPR_TIMER_SCOPE("perform_stream_op_locked", 0);
grpc_transport_stream_op_batch* op =
static_cast<grpc_transport_stream_op_batch*>(stream_op);
grpc_chttp2_stream* s =
@ -1468,7 +1455,6 @@ static void perform_stream_op_locked(void* stream_op,
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
GPR_TIMER_SCOPE("perform_stream_op", 0);
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
@ -2389,8 +2375,6 @@ static void read_action(void* tp, grpc_error_handle error) {
}
static void read_action_locked(void* tp, grpc_error_handle error) {
GPR_TIMER_SCOPE("reading_action_locked", 0);
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
(void)GRPC_ERROR_REF(error);
@ -2405,7 +2389,6 @@ static void read_action_locked(void* tp, grpc_error_handle error) {
std::swap(err, error);
GRPC_ERROR_UNREF(err);
if (GRPC_ERROR_IS_NONE(t->closed_with_error)) {
GPR_TIMER_SCOPE("reading_action.parse", 0);
size_t i = 0;
grpc_error_handle errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
@ -2422,7 +2405,6 @@ static void read_action_locked(void* tp, grpc_error_handle error) {
GRPC_ERROR_UNREF(errors[i]);
}
GPR_TIMER_SCOPE("post_parse_locked", 0);
if (t->initial_window_update != 0) {
if (t->initial_window_update > 0) {
grpc_chttp2_stream* s;
@ -2436,7 +2418,6 @@ static void read_action_locked(void* tp, grpc_error_handle error) {
}
}
GPR_TIMER_SCOPE("post_reading_action_locked", 0);
bool keep_reading = false;
if (GRPC_ERROR_IS_NONE(error) && !GRPC_ERROR_IS_NONE(t->closed_with_error)) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(

@ -49,7 +49,6 @@
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_refcount_base.h"
#include "src/core/lib/transport/http2_errors.h"
@ -1343,7 +1342,6 @@ grpc_error_handle grpc_chttp2_header_parser_parse(void* hpack_parser,
grpc_chttp2_stream* s,
const grpc_slice& slice,
int is_last) {
GPR_TIMER_SCOPE("grpc_chttp2_header_parser_parse", 0);
auto* parser = static_cast<grpc_core::HPackParser*>(hpack_parser);
if (s != nullptr) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);

@ -57,7 +57,6 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/bdp_estimator.h"
@ -266,7 +265,6 @@ class WriteContext {
public:
explicit WriteContext(grpc_chttp2_transport* t) : t_(t) {
GRPC_STATS_INC_HTTP2_WRITES_BEGUN();
GPR_TIMER_SCOPE("grpc_chttp2_begin_write", 0);
}
void FlushSettings() {
@ -664,7 +662,6 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
GPR_TIMER_SCOPE("grpc_chttp2_end_write", 0);
grpc_chttp2_stream* s;
if (t->channelz_socket != nullptr) {

@ -24,10 +24,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/profiling/timers.h"
void* gpr_malloc(size_t size) {
GPR_TIMER_SCOPE("gpr_malloc", 0);
void* p;
if (size == 0) return nullptr;
p = malloc(size);
@ -38,7 +35,6 @@ void* gpr_malloc(size_t size) {
}
void* gpr_zalloc(size_t size) {
GPR_TIMER_SCOPE("gpr_zalloc", 0);
void* p;
if (size == 0) return nullptr;
p = calloc(size, 1);
@ -48,13 +44,9 @@ void* gpr_zalloc(size_t size) {
return p;
}
void gpr_free(void* p) {
GPR_TIMER_SCOPE("gpr_free", 0);
free(p);
}
void gpr_free(void* p) { free(p); }
void* gpr_realloc(void* p, size_t size) {
GPR_TIMER_SCOPE("gpr_realloc", 0);
if ((size == 0) && (p == nullptr)) return nullptr;
p = realloc(p, size);
if (!p) {

@ -33,8 +33,6 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/lib/profiling/timers.h"
#ifdef GPR_LOW_LEVEL_COUNTERS
gpr_atm gpr_mu_locks = 0;
gpr_atm gpr_counter_atm_cas = 0;
@ -52,17 +50,14 @@ void gpr_mu_destroy(gpr_mu* mu) {
}
void gpr_mu_lock(gpr_mu* mu) ABSL_NO_THREAD_SAFETY_ANALYSIS {
GPR_TIMER_SCOPE("gpr_mu_lock", 0);
reinterpret_cast<absl::Mutex*>(mu)->Lock();
}
void gpr_mu_unlock(gpr_mu* mu) ABSL_NO_THREAD_SAFETY_ANALYSIS {
GPR_TIMER_SCOPE("gpr_mu_unlock", 0);
reinterpret_cast<absl::Mutex*>(mu)->Unlock();
}
int gpr_mu_trylock(gpr_mu* mu) {
GPR_TIMER_SCOPE("gpr_mu_trylock", 0);
return reinterpret_cast<absl::Mutex*>(mu)->TryLock();
}
@ -79,7 +74,6 @@ void gpr_cv_destroy(gpr_cv* cv) {
}
int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
GPR_TIMER_SCOPE("gpr_cv_wait", 0);
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
0) {
reinterpret_cast<absl::CondVar*>(cv)->Wait(
@ -94,12 +88,10 @@ int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
}
void gpr_cv_signal(gpr_cv* cv) {
GPR_TIMER_MARK("gpr_cv_signal", 0);
reinterpret_cast<absl::CondVar*>(cv)->Signal();
}
void gpr_cv_broadcast(gpr_cv* cv) {
GPR_TIMER_MARK("gpr_cv_broadcast", 0);
reinterpret_cast<absl::CondVar*>(cv)->SignalAll();
}

@ -29,8 +29,6 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/lib/profiling/timers.h"
#ifdef GPR_LOW_LEVEL_COUNTERS
gpr_atm gpr_mu_locks = 0;
gpr_atm gpr_counter_atm_cas = 0;
@ -60,7 +58,6 @@ void gpr_mu_lock(gpr_mu* mu) {
#ifdef GPR_LOW_LEVEL_COUNTERS
GPR_ATM_INC_COUNTER(gpr_mu_locks);
#endif
GPR_TIMER_SCOPE("gpr_mu_lock", 0);
#ifdef GRPC_ASAN_ENABLED
GPR_ASSERT(pthread_mutex_lock(&mu->mutex) == 0);
#else
@ -69,7 +66,6 @@ void gpr_mu_lock(gpr_mu* mu) {
}
void gpr_mu_unlock(gpr_mu* mu) {
GPR_TIMER_SCOPE("gpr_mu_unlock", 0);
#ifdef GRPC_ASAN_ENABLED
GPR_ASSERT(pthread_mutex_unlock(&mu->mutex) == 0);
#else
@ -78,7 +74,6 @@ void gpr_mu_unlock(gpr_mu* mu) {
}
int gpr_mu_trylock(gpr_mu* mu) {
GPR_TIMER_SCOPE("gpr_mu_trylock", 0);
int err = 0;
#ifdef GRPC_ASAN_ENABLED
err = pthread_mutex_trylock(&mu->mutex);

@ -25,7 +25,6 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/profiling/timers.h"
namespace grpc_core {
@ -113,7 +112,6 @@ void CallCombiner::ScheduleClosure(grpc_closure* closure,
void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Start", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"==> CallCombiner::Start() [%p] closure=%p [" DEBUG_FMT_STR
@ -128,7 +126,6 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
prev_size + 1);
}
if (prev_size == 0) {
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
}
@ -146,7 +143,6 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
}
void CallCombiner::Stop(DEBUG_ARGS const char* reason) {
GPR_TIMER_SCOPE("CallCombiner::Stop", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, "==> CallCombiner::Stop() [%p] [" DEBUG_FMT_STR "%s]",
this DEBUG_FMT_ARGS, reason);

@ -31,7 +31,6 @@
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/gprpp/mpscq.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
struct grpc_closure;
typedef struct grpc_closure grpc_closure;

@ -57,7 +57,6 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
static grpc_wakeup_fd global_wakeup_fd;
@ -586,7 +585,6 @@ static void pollset_destroy(grpc_pollset* pollset) {
}
static grpc_error_handle pollset_kick_all(grpc_pollset* pollset) {
GPR_TIMER_SCOPE("pollset_kick_all", 0);
grpc_error_handle error = GRPC_ERROR_NONE;
if (pollset->root_worker != nullptr) {
grpc_pollset_worker* worker = pollset->root_worker;
@ -618,7 +616,6 @@ static grpc_error_handle pollset_kick_all(grpc_pollset* pollset) {
static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
pollset->begin_refs == 0) {
GPR_TIMER_MARK("pollset_finish_shutdown", 0);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, pollset->shutdown_closure,
GRPC_ERROR_NONE);
pollset->shutdown_closure = nullptr;
@ -626,7 +623,6 @@ static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
}
static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
GPR_TIMER_SCOPE("pollset_shutdown", 0);
GPR_ASSERT(pollset->shutdown_closure == nullptr);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
@ -656,8 +652,6 @@ static int poll_deadline_to_millis_timeout(grpc_core::Timestamp millis) {
called by g_active_poller thread. So there is no need for synchronization
when accessing fields in g_epoll_set */
static grpc_error_handle process_epoll_events(grpc_pollset* /*pollset*/) {
GPR_TIMER_SCOPE("process_epoll_events", 0);
static const char* err_desc = "process_events";
grpc_error_handle error = GRPC_ERROR_NONE;
long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
@ -709,8 +703,6 @@ static grpc_error_handle process_epoll_events(grpc_pollset* /*pollset*/) {
no need for any synchronization when accesing fields in g_epoll_set */
static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("do_epoll_wait", 0);
int r;
int timeout = poll_deadline_to_millis_timeout(deadline);
if (timeout != 0) {
@ -739,7 +731,6 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("begin_worker", 0);
if (worker_hdl != nullptr) *worker_hdl = worker;
worker->initialized_cv = false;
SET_KICK_STATE(worker, UNKICKED);
@ -862,7 +853,6 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
static bool check_neighborhood_for_available_poller(
pollset_neighborhood* neighborhood) {
GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0);
bool found_worker = false;
do {
grpc_pollset* inspect = neighborhood->active_root;
@ -885,7 +875,6 @@ static bool check_neighborhood_for_available_poller(
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@ -926,7 +915,6 @@ static bool check_neighborhood_for_available_poller(
static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) {
GPR_TIMER_SCOPE("end_worker", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
}
@ -1005,7 +993,6 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
static grpc_error_handle pollset_work(grpc_pollset* ps,
grpc_pollset_worker** worker_hdl,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollset_work", 0);
grpc_pollset_worker worker;
grpc_error_handle error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_work";
@ -1055,7 +1042,6 @@ static grpc_error_handle pollset_work(grpc_pollset* ps,
static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GPR_TIMER_SCOPE("pollset_kick", 0);
grpc_error_handle ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
std::vector<std::string> log;

@ -48,7 +48,6 @@
#include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker*)1)
@ -769,13 +768,11 @@ static void kick_append_error(grpc_error_handle* composite,
static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
grpc_pollset_worker* specific_worker,
uint32_t flags) {
GPR_TIMER_SCOPE("pollset_kick_ext", 0);
grpc_error_handle error = GRPC_ERROR_NONE;
/* pollset->mu already held */
if (specific_worker != nullptr) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
GPR_TIMER_SCOPE("pollset_kick_ext.broadcast", 0);
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
@ -785,7 +782,6 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
}
p->kicked_without_pollers = true;
} else if (g_current_thread_worker != specific_worker) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = true;
}
@ -793,7 +789,6 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
kick_append_error(&error,
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
} else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
GPR_TIMER_MARK("kick_yoself", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = true;
}
@ -803,11 +798,9 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
}
} else if (g_current_thread_poller != p) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != nullptr) {
if (g_current_thread_worker == specific_worker) {
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
@ -817,13 +810,11 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
}
}
if (specific_worker != nullptr) {
GPR_TIMER_MARK("finally_kick", 0);
push_back_worker(p, specific_worker);
kick_append_error(
&error, grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
} else {
GPR_TIMER_MARK("kicked_no_pollers", 0);
p->kicked_without_pollers = true;
}
}
@ -913,7 +904,6 @@ static void work_combine_error(grpc_error_handle* composite,
static grpc_error_handle pollset_work(grpc_pollset* pollset,
grpc_pollset_worker** worker_hdl,
grpc_core::Timestamp deadline) {
GPR_TIMER_SCOPE("pollset_work", 0);
grpc_pollset_worker worker;
if (worker_hdl) *worker_hdl = &worker;
grpc_error_handle error = GRPC_ERROR_NONE;
@ -947,7 +937,6 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
worker.kicked_specifically = 0;
/* If we're shutting down then we don't execute any extended work */
if (pollset->shutting_down) {
GPR_TIMER_MARK("pollset_work.shutting_down", 0);
goto done;
}
/* Start polling, and keep doing so while we're being asked to
@ -964,7 +953,6 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
added_worker = 1;
g_current_thread_worker = &worker;
}
GPR_TIMER_SCOPE("maybe_work_and_unlock", 0);
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
@ -1088,7 +1076,6 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
locked = 0;
} else {
GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
pollset->kicked_without_pollers = 0;
}
/* Finished execution - start cleaning up.

@ -25,7 +25,6 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/profiling/timers.h"
static void exec_ctx_run(grpc_closure* closure) {
#ifndef NDEBUG
@ -60,7 +59,6 @@ ApplicationCallbackExecCtx::callback_exec_ctx_;
bool ExecCtx::Flush() {
bool did_something = false;
GPR_TIMER_SCOPE("grpc_exec_ctx_flush", 0);
for (;;) {
if (!grpc_closure_list_empty(closure_list_)) {
grpc_closure* c = closure_list_.head;

@ -58,7 +58,6 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/tcp_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resource_quota/api.h"
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/resource_quota/trace.h"
@ -831,7 +830,6 @@ static void tcp_trace_read(grpc_tcp* tcp, grpc_error_handle error)
#define MAX_READ_IOVEC 4
static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(tcp->read_mu) {
GPR_TIMER_SCOPE("tcp_do_read", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
}
@ -879,7 +877,6 @@ static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
do {
GPR_TIMER_SCOPE("recvmsg", 0);
GRPC_STATS_INC_SYSCALL_READ();
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
@ -1103,7 +1100,6 @@ static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
* of bytes sent. */
ssize_t tcp_send(int fd, const struct msghdr* msg, int* saved_errno,
int additional_flags = 0) {
GPR_TIMER_SCOPE("sendmsg", 1);
ssize_t sent_length;
do {
/* TODO(klempner): Cork if this is a partial write */
@ -1745,7 +1741,6 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
grpc_closure* cb, void* arg, int /*max_frame_size*/) {
GPR_TIMER_SCOPE("tcp_write", 0);
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
grpc_error_handle error = GRPC_ERROR_NONE;
TcpZerocopySendRecord* zerocopy_send_record = nullptr;

@ -29,7 +29,6 @@
#include <grpc/support/log.h>
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
static grpc_error_handle eventfd_create(grpc_wakeup_fd* fd_info) {
fd_info->read_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
@ -53,7 +52,6 @@ static grpc_error_handle eventfd_consume(grpc_wakeup_fd* fd_info) {
}
static grpc_error_handle eventfd_wakeup(grpc_wakeup_fd* fd_info) {
GPR_TIMER_SCOPE("eventfd_wakeup", 0);
int err;
do {
err = eventfd_write(fd_info->read_fd, 1);

@ -1,295 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/profiling/timers.h"
#ifdef GRPC_BASIC_PROFILER
#include <inttypes.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gprpp/global_config.h"
#include "src/core/lib/profiling/timers.h"
typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
typedef struct gpr_timer_entry {
gpr_timespec tm;
const char* tagstr;
const char* file;
short line;
char type;
uint8_t important;
int thd;
} gpr_timer_entry;
#define MAX_COUNT 1000000
typedef struct gpr_timer_log {
size_t num_entries;
struct gpr_timer_log* next;
struct gpr_timer_log* prev;
gpr_timer_entry log[MAX_COUNT];
} gpr_timer_log;
typedef struct gpr_timer_log_list {
gpr_timer_log* head;
/* valid iff head!=NULL */
gpr_timer_log* tail;
} gpr_timer_log_list;
static GPR_THREAD_LOCAL(gpr_timer_log*) g_thread_log;
static gpr_once g_once_init = GPR_ONCE_INIT;
static FILE* output_file;
static const char* output_filename_or_null = NULL;
static pthread_mutex_t g_mu;
static pthread_cond_t g_cv;
static gpr_timer_log_list g_in_progress_logs;
static gpr_timer_log_list g_done_logs;
static int g_shutdown;
static pthread_t g_writing_thread;
static GPR_THREAD_LOCAL(int) g_thread_id;
static int g_next_thread_id;
static int g_writing_enabled = 1;
GPR_GLOBAL_CONFIG_DEFINE_STRING(grpc_latency_trace, "latency_trace.txt",
"Output file name for latency trace")
static const char* output_filename() {
if (output_filename_or_null == NULL) {
grpc_core::UniquePtr<char> value =
GPR_GLOBAL_CONFIG_GET(grpc_latency_trace);
if (strlen(value.get()) > 0) {
output_filename_or_null = value.release();
} else {
output_filename_or_null = "latency_trace.txt";
}
}
return output_filename_or_null;
}
static int timer_log_push_back(gpr_timer_log_list* list, gpr_timer_log* log) {
if (list->head == NULL) {
list->head = list->tail = log;
log->next = log->prev = NULL;
return 1;
} else {
log->prev = list->tail;
log->next = NULL;
list->tail->next = log;
list->tail = log;
return 0;
}
}
static gpr_timer_log* timer_log_pop_front(gpr_timer_log_list* list) {
gpr_timer_log* out = list->head;
if (out != NULL) {
list->head = out->next;
if (list->head != NULL) {
list->head->prev = NULL;
} else {
list->tail = NULL;
}
}
return out;
}
static void timer_log_remove(gpr_timer_log_list* list, gpr_timer_log* log) {
if (log->prev == NULL) {
list->head = log->next;
if (list->head != NULL) {
list->head->prev = NULL;
}
} else {
log->prev->next = log->next;
}
if (log->next == NULL) {
list->tail = log->prev;
if (list->tail != NULL) {
list->tail->next = NULL;
}
} else {
log->next->prev = log->prev;
}
}
static void write_log(gpr_timer_log* log) {
size_t i;
if (output_file == NULL) {
output_file = fopen(output_filename(), "w");
}
for (i = 0; i < log->num_entries; i++) {
gpr_timer_entry* entry = &(log->log[i]);
if (gpr_time_cmp(entry->tm, gpr_time_0(entry->tm.clock_type)) < 0) {
entry->tm = gpr_time_0(entry->tm.clock_type);
}
fprintf(output_file,
"{\"t\": %" PRId64
".%09d, \"thd\": \"%d\", \"type\": \"%c\", \"tag\": "
"\"%s\", \"file\": \"%s\", \"line\": %d, \"imp\": %d}\n",
entry->tm.tv_sec, entry->tm.tv_nsec, entry->thd, entry->type,
entry->tagstr, entry->file, entry->line, entry->important);
}
}
static void* writing_thread(void* unused) {
gpr_timer_log* log;
pthread_mutex_lock(&g_mu);
for (;;) {
while ((log = timer_log_pop_front(&g_done_logs)) == NULL && !g_shutdown) {
pthread_cond_wait(&g_cv, &g_mu);
}
if (log != NULL) {
pthread_mutex_unlock(&g_mu);
write_log(log);
free(log);
pthread_mutex_lock(&g_mu);
}
if (g_shutdown) {
pthread_mutex_unlock(&g_mu);
return NULL;
}
}
}
static void flush_logs(gpr_timer_log_list* list) {
gpr_timer_log* log;
while ((log = timer_log_pop_front(list)) != NULL) {
write_log(log);
free(log);
}
}
static void finish_writing(void) {
pthread_mutex_lock(&g_mu);
g_shutdown = 1;
pthread_cond_signal(&g_cv);
pthread_mutex_unlock(&g_mu);
pthread_join(g_writing_thread, NULL);
gpr_log(GPR_INFO, "flushing logs");
pthread_mutex_lock(&g_mu);
flush_logs(&g_done_logs);
flush_logs(&g_in_progress_logs);
pthread_mutex_unlock(&g_mu);
if (output_file) {
fclose(output_file);
}
}
void gpr_timers_set_log_filename(const char* filename) {
output_filename_or_null = filename;
}
static void init_output() {
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
pthread_create(&g_writing_thread, &attr, &writing_thread, NULL);
pthread_attr_destroy(&attr);
atexit(finish_writing);
}
static void rotate_log() {
/* Using malloc here, as this code could end up being called by gpr_malloc */
gpr_timer_log* log = static_cast<gpr_timer_log*>(malloc(sizeof(*log)));
gpr_once_init(&g_once_init, init_output);
log->num_entries = 0;
pthread_mutex_lock(&g_mu);
if (g_thread_log != NULL) {
timer_log_remove(&g_in_progress_logs, g_thread_log);
if (timer_log_push_back(&g_done_logs, g_thread_log)) {
pthread_cond_signal(&g_cv);
}
} else {
g_thread_id = g_next_thread_id++;
}
timer_log_push_back(&g_in_progress_logs, log);
pthread_mutex_unlock(&g_mu);
g_thread_log = log;
}
static void gpr_timers_log_add(const char* tagstr, marker_type type,
int important, const char* file, int line) {
gpr_timer_entry* entry;
if (!g_writing_enabled) {
return;
}
if (g_thread_log == NULL || g_thread_log->num_entries == MAX_COUNT) {
rotate_log();
}
entry = &g_thread_log->log[g_thread_log->num_entries++];
entry->tm = gpr_now(GPR_CLOCK_PRECISE);
entry->tagstr = tagstr;
entry->type = type;
entry->file = file;
entry->line = (short)line;
entry->important = important != 0;
entry->thd = g_thread_id;
}
/* Latency profiler API implementation. */
void gpr_timer_add_mark(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, MARK, important, file, line);
}
void gpr_timer_begin(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, BEGIN, important, file, line);
}
void gpr_timer_end(const char* tagstr, int important, const char* file,
int line) {
gpr_timers_log_add(tagstr, END, important, file, line);
}
void gpr_timer_set_enabled(int enabled) { g_writing_enabled = enabled; }
/* Basic profiler specific API functions. */
void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
#else /* !GRPC_BASIC_PROFILER */
void gpr_timers_global_init(void) {}
void gpr_timers_global_destroy(void) {}
void gpr_timers_set_log_filename(const char* /*filename*/) {}
void gpr_timer_set_enabled(int /*enabled*/) {}
#endif /* GRPC_BASIC_PROFILER */

@ -1,7 +0,0 @@
provider _stap {
probe add_mark(int tag);
probe add_important_mark(int tag);
probe timing_ns_begin(int tag);
probe timing_ns_end(int tag);
};

@ -1,50 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GRPC_STAP_PROFILER
#include <sys/sdt.h>
#include "src/core/lib/profiling/timers.h"
/* Generated from src/core/profiling/stap_probes.d */
#include "src/core/lib/profiling/stap_probes.h"
/* Latency profiler API implementation. */
void gpr_timer_add_mark(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_ADD_MARK(tag);
}
void gpr_timer_add_important_mark(int tag, const char* tagstr, void* id,
const char* file, int line) {
_STAP_ADD_IMPORTANT_MARK(tag);
}
void gpr_timer_begin(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_TIMING_NS_BEGIN(tag);
}
void gpr_timer_end(int tag, const char* tagstr, void* id, const char* file,
int line) {
_STAP_TIMING_NS_END(tag);
}
#endif /* GRPC_STAP_PROFILER */

@ -1,94 +0,0 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_PROFILING_TIMERS_H
#define GRPC_CORE_LIB_PROFILING_TIMERS_H
void gpr_timers_global_init(void);
void gpr_timers_global_destroy(void);
void gpr_timer_add_mark(const char* tagstr, int important, const char* file,
int line);
void gpr_timer_begin(const char* tagstr, int important, const char* file,
int line);
void gpr_timer_end(const char* tagstr, int important, const char* file,
int line);
void gpr_timers_set_log_filename(const char* filename);
void gpr_timer_set_enabled(int enabled);
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
defined(GRPC_CUSTOM_PROFILER))
/* No profiling. No-op all the things. */
#define GPR_TIMER_MARK(tag, important) \
do { \
} while (0)
#define GPR_TIMER_SCOPE(tag, important) \
do { \
} while (0)
#else /* at least one profiler requested... */
/* ... hopefully only one. */
#if defined(GRPC_STAP_PROFILER) && defined(GRPC_BASIC_PROFILER)
#error "GRPC_STAP_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
#endif
#if defined(GRPC_STAP_PROFILER) && defined(GRPC_CUSTOM_PROFILER)
#error "GRPC_STAP_PROFILER and GRPC_CUSTOM_PROFILER are mutually exclusive."
#endif
#if defined(GRPC_CUSTOM_PROFILER) && defined(GRPC_BASIC_PROFILER)
#error "GRPC_CUSTOM_PROFILER and GRPC_BASIC_PROFILER are mutually exclusive."
#endif
/* Generic profiling interface. */
#define GPR_TIMER_MARK(tag, important) \
gpr_timer_add_mark(tag, important, __FILE__, __LINE__);
#ifdef GRPC_STAP_PROFILER
/* Empty placeholder for now. */
#endif /* GRPC_STAP_PROFILER */
#ifdef GRPC_BASIC_PROFILER
/* Empty placeholder for now. */
#endif /* GRPC_BASIC_PROFILER */
namespace grpc {
class ProfileScope {
public:
ProfileScope(const char* desc, bool important, const char* file, int line)
: desc_(desc) {
gpr_timer_begin(desc_, important ? 1 : 0, file, line);
}
~ProfileScope() { gpr_timer_end(desc_, 0, "n/a", 0); }
private:
const char* const desc_;
};
} // namespace grpc
#define GPR_TIMER_SCOPE_NAME_INTERNAL(prefix, line) prefix##line
#define GPR_TIMER_SCOPE_NAME(prefix, line) \
GPR_TIMER_SCOPE_NAME_INTERNAL(prefix, line)
#define GPR_TIMER_SCOPE(tag, important) \
::grpc::ProfileScope GPR_TIMER_SCOPE_NAME(_profile_scope_, __LINE__)( \
(tag), (important), __FILE__, __LINE__)
#endif /* at least one profiler requested. */
#endif /* GRPC_CORE_LIB_PROFILING_TIMERS_H */

@ -49,7 +49,6 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resource_quota/api.h"
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/resource_quota/resource_quota.h"
@ -385,8 +384,6 @@ static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
grpc_closure* cb, void* arg, int max_frame_size) {
GPR_TIMER_SCOPE("secure_endpoint.endpoint_write", 0);
unsigned i;
tsi_result result = TSI_OK;
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);

@ -63,7 +63,6 @@
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/slice_buffer.h"
#include "src/core/lib/slice/slice_internal.h"
@ -516,8 +515,6 @@ void Call::PublishToParent(Call* parent) {
grpc_error_handle FilterStackCall::Create(grpc_call_create_args* args,
grpc_call** out_call) {
GPR_TIMER_SCOPE("grpc_call_create", 0);
Channel* channel = args->channel.get();
auto add_init_error = [](grpc_error_handle* composite,
@ -643,7 +640,6 @@ void FilterStackCall::ReleaseCall(void* call, grpc_error_handle /*error*/) {
}
void FilterStackCall::DestroyCall(void* call, grpc_error_handle /*error*/) {
GPR_TIMER_SCOPE("destroy_call", 0);
auto* c = static_cast<FilterStackCall*>(call);
c->recv_initial_metadata_.Clear();
c->recv_trailing_metadata_.Clear();
@ -690,8 +686,6 @@ void Call::MaybeUnpublishFromParent() {
void FilterStackCall::ExternalUnref() {
if (GPR_LIKELY(!ext_ref_.Unref())) return;
GPR_TIMER_SCOPE("grpc_call_unref", 0);
ApplicationCallbackExecCtx callback_exec_ctx;
ExecCtx exec_ctx;
@ -729,7 +723,6 @@ void FilterStackCall::ExecuteBatch(grpc_transport_stream_op_batch* batch,
// This is called via the call combiner to start sending a batch down
// the filter stack.
auto execute_batch_in_call_combiner = [](void* arg, grpc_error_handle) {
GPR_TIMER_SCOPE("execute_batch_in_call_combiner", 0);
grpc_transport_stream_op_batch* batch =
static_cast<grpc_transport_stream_op_batch*>(arg);
auto* call =
@ -929,7 +922,6 @@ void FilterStackCall::PublishAppMetadata(grpc_metadata_batch* b,
if (b->count() == 0) return;
if (!is_client() && is_trailing) return;
if (is_trailing && buffered_metadata_[1] == nullptr) return;
GPR_TIMER_SCOPE("publish_app_metadata", 0);
grpc_metadata_array* dest;
dest = buffered_metadata_[is_trailing];
if (dest->count + b->count() > dest->capacity) {
@ -1230,7 +1222,6 @@ void FilterStackCall::BatchControl::ReceivingInitialMetadataReady(
call->RecvInitialFilter(md);
/* TODO(ctiller): this could be moved into recv_initial_filter now */
GPR_TIMER_SCOPE("validate_filtered_metadata", 0);
ValidateFilteredMetadata();
absl::optional<Timestamp> deadline = md->get(GrpcTimeoutMetadata());
@ -1301,8 +1292,6 @@ void FilterStackCall::BatchControl::FinishBatch(grpc_error_handle error) {
grpc_call_error FilterStackCall::StartBatch(const grpc_op* ops, size_t nops,
void* notify_tag,
bool is_notify_tag_closure) {
GPR_TIMER_SCOPE("call_start_batch", 0);
size_t i;
const grpc_op* op;
BatchControl* bctl;

@ -51,7 +51,6 @@
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/event_string.h"
@ -503,8 +502,6 @@ grpc_cq_completion* CqEventQueue::Pop() {
grpc_completion_queue* grpc_completion_queue_create_internal(
grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type,
grpc_completion_queue_functor* shutdown_callback) {
GPR_TIMER_SCOPE("grpc_completion_queue_create_internal", 0);
grpc_completion_queue* cq;
GRPC_API_TRACE(
@ -680,8 +677,6 @@ static void cq_end_op_for_next(
grpc_completion_queue* cq, void* tag, grpc_error_handle error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool /*internal*/) {
GPR_TIMER_SCOPE("cq_end_op_for_next", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
(GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
!GRPC_ERROR_IS_NONE(error))) {
@ -758,8 +753,6 @@ static void cq_end_op_for_pluck(
grpc_completion_queue* cq, void* tag, grpc_error_handle error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool /*internal*/) {
GPR_TIMER_SCOPE("cq_end_op_for_pluck", 0);
cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
int is_success = (GRPC_ERROR_IS_NONE(error));
@ -828,8 +821,6 @@ static void cq_end_op_for_callback(
grpc_completion_queue* cq, void* tag, grpc_error_handle error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal) {
GPR_TIMER_SCOPE("cq_end_op_for_callback", 0);
cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
@ -950,8 +941,6 @@ static void dump_pending_tags(grpc_completion_queue* /*cq*/) {}
static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
void* reserved) {
GPR_TIMER_SCOPE("grpc_completion_queue_next", 0);
grpc_event ret;
cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
@ -1195,8 +1184,6 @@ class ExecCtxPluck : public grpc_core::ExecCtx {
static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
gpr_timespec deadline, void* reserved) {
GPR_TIMER_SCOPE("grpc_completion_queue_pluck", 0);
grpc_event ret;
grpc_cq_completion* c;
grpc_cq_completion* prev;
@ -1402,7 +1389,6 @@ static void cq_shutdown_callback(grpc_completion_queue* cq) {
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
GPR_TIMER_SCOPE("grpc_completion_queue_shutdown", 0);
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
@ -1410,7 +1396,6 @@ void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
}
void grpc_completion_queue_destroy(grpc_completion_queue* cq) {
GPR_TIMER_SCOPE("grpc_completion_queue_destroy", 0);
GRPC_API_TRACE("grpc_completion_queue_destroy(cq=%p)", 1, (cq));
grpc_completion_queue_shutdown(cq);

@ -46,7 +46,6 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/timer_manager.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/security/authorization/grpc_server_authz_filter.h"
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
@ -154,7 +153,6 @@ void grpc_init(void) {
grpc_fork_handlers_auto_register();
grpc_core::ApplicationCallbackExecCtx::GlobalInit();
grpc_iomgr_init();
gpr_timers_global_init();
for (int i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].init != nullptr) {
g_all_of_the_plugins[i].init();
@ -183,7 +181,6 @@ void grpc_shutdown_internal_locked(void)
}
grpc_event_engine::experimental::ResetDefaultEventEngine();
grpc_iomgr_shutdown();
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_core::Fork::GlobalShutdown();
}

@ -73,7 +73,6 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resource_quota/api.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
@ -813,7 +812,6 @@ class Server::SyncRequestThreadManager : public grpc::ThreadManager {
GPR_DEBUG_ASSERT(sync_req != nullptr);
GPR_DEBUG_ASSERT(ok);
GPR_TIMER_SCOPE("sync_req->Run()", 0);
sync_req->Run(global_callbacks_, resources);
}

@ -591,8 +591,6 @@ CORE_SOURCE_FILES = [
'src/core/lib/load_balancing/lb_policy.cc',
'src/core/lib/load_balancing/lb_policy_registry.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',

@ -758,18 +758,6 @@
$(E) "[MAKE] Generating $@"
$(Q) echo "$(CACHE_MK)" | tr , '\n' >$@
ifeq ($(CONFIG),stapprof)
src/core/profiling/stap_timers.c: $(GENDIR)/src/core/profiling/stap_probes.h
ifeq ($(HAS_SYSTEMTAP),true)
$(GENDIR)/src/core/profiling/stap_probes.h: src/core/profiling/stap_probes.d
$(E) "[DTRACE] Compiling $<"
$(Q) mkdir -p `dirname $@`
$(Q) $(DTRACE) -C -h -s $< -o $@
else
$(GENDIR)/src/core/profiling/stap_probes.h: systemtap_dep_error stop
endif
endif
$(OBJDIR)/$(CONFIG)/%.o : %.c
$(E) "[C] Compiling $<"
$(Q) mkdir -p `dirname $@`

@ -25,7 +25,6 @@
#include <grpc/support/time.h>
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/profiling/timers.h"
#include "test/core/util/cmdline.h"
#include "test/core/util/grpc_profiler.h"
#include "test/core/util/histogram.h"
@ -75,7 +74,6 @@ static void init_ping_pong_request(void) {
}
static void step_ping_pong_request(void) {
GPR_TIMER_SCOPE("ping_pong", 1);
grpc_slice host = grpc_slice_from_static_string("localhost");
call = grpc_channel_create_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
@ -118,7 +116,6 @@ static void init_ping_pong_stream(void) {
}
static void step_ping_pong_stream(void) {
GPR_TIMER_SCOPE("ping_pong", 1);
grpc_call_error error;
error = grpc_call_start_batch(call, stream_step_ops, 2,
reinterpret_cast<void*>(1), nullptr);
@ -159,8 +156,6 @@ int main(int argc, char** argv) {
const char* scenario_name = "ping-pong-request";
scenario sc = {nullptr, nullptr, nullptr};
gpr_timers_set_log_filename("latency_trace.fling_client.txt");
GPR_ASSERT(argc >= 1);
fake_argv[0] = argv[0];
grpc::testing::TestEnvironment env(&fake_argc, fake_argv);

@ -34,7 +34,6 @@
#include <grpc/support/time.h>
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/profiling/timers.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/cmdline.h"
#include "test/core/util/grpc_profiler.h"
@ -183,8 +182,6 @@ int main(int argc, char** argv) {
char* fake_argv[1];
gpr_timers_set_log_filename("latency_trace.fling_server.txt");
GPR_ASSERT(argc >= 1);
argc = 1;
fake_argv[0] = argv[0];

@ -44,7 +44,6 @@
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport_impl.h"
@ -156,7 +155,6 @@ static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
grpc::testing::EchoResponse recv_response;
grpc::Status recv_status;
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc::ClientContext cli_ctx;
auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq);
reader->Finish(&recv_response, &recv_status, tag(0));
@ -191,7 +189,6 @@ static void BM_LameChannelCallCreateCore(benchmark::State& state) {
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
@ -265,7 +262,6 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
@ -565,7 +561,6 @@ static void BM_IsolatedFilter(benchmark::State& state) {
grpc_core::Arena::Create(kArenaSize, g_memory_allocator),
nullptr};
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
GRPC_ERROR_UNREF(
grpc_call_stack_init(channel_stack, 1, DoNothing, nullptr, &call_args));
typename TestOp::Op op(&test_op_data, call_stack, call_args.arena);
@ -751,7 +746,6 @@ static void BM_IsolatedCall_NoOp(benchmark::State& state) {
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, nullptr));
@ -790,7 +784,6 @@ static void BM_IsolatedCall_Unary(benchmark::State& state) {
ops[5].data.recv_status_on_client.status_details = &status_details;
ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata;
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, nullptr);
@ -833,7 +826,6 @@ static void BM_IsolatedCall_StreamingSend(benchmark::State& state) {
ops[0].op = GRPC_OP_SEND_MESSAGE;
ops[0].data.send_message.send_message = send_message;
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
grpc_call_start_batch(call, ops, 1, tag(2), nullptr);
grpc_completion_queue_next(fixture.cq(),
gpr_inf_future(GPR_CLOCK_MONOTONIC), nullptr);

@ -23,7 +23,6 @@
#include <benchmark/benchmark.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/callback_test_service.h"
#include "test/cpp/microbenchmarks/fullstack_context_mutators.h"
@ -136,7 +135,6 @@ static void BM_CallbackBidiStreaming(benchmark::State& state) {
request.set_message("");
}
if (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
BidiClient test{&state, stub_.get(), &cli_ctx, &request, &response};
test.Await();
}

@ -25,7 +25,6 @@
#include <benchmark/benchmark.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/callback_test_service.h"
#include "test/cpp/microbenchmarks/fullstack_context_mutators.h"
@ -83,7 +82,6 @@ static void BM_CallbackUnaryPingPong(benchmark::State& state) {
std::condition_variable cv;
bool done = false;
if (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
SendCallbackUnaryPingPong(&state, &cli_ctx, &request, &response,
stub_.get(), &done, &mu, &cv);
}

@ -25,7 +25,6 @@
#include <benchmark/benchmark.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/fullstack_context_mutators.h"
#include "test/cpp/microbenchmarks/fullstack_fixtures.h"
@ -183,7 +182,6 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
}
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(0)); // Start client send
response_rw.Read(&recv_request, tag(1)); // Start server recv
request_rw->Read(&recv_response, tag(2)); // Start client recv

@ -25,7 +25,6 @@
#include <benchmark/benchmark.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/fullstack_context_mutators.h"
#include "test/cpp/microbenchmarks/fullstack_fixtures.h"
@ -70,7 +69,6 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
}
response_rw.Read(&recv_request, tag(0));
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@ -139,7 +137,6 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
}
request_rw->Read(&recv_response, tag(0));
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
response_rw.Write(send_response, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));

@ -25,7 +25,6 @@
#include <benchmark/benchmark.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/cpp/microbenchmarks/fullstack_context_mutators.h"
#include "test/cpp/microbenchmarks/fullstack_fixtures.h"
@ -74,7 +73,6 @@ static void BM_UnaryPingPong(benchmark::State& state) {
std::unique_ptr<EchoTestService::Stub> stub(
EchoTestService::NewStub(fixture->channel()));
for (auto _ : state) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
recv_response.Clear();
ClientContext cli_ctx;
ClientContextMutator cli_ctx_mut(&cli_ctx);

@ -186,7 +186,6 @@ class CallbackUnaryClient final : public CallbackClient {
}
void IssueUnaryCallbackRpc(Thread* t, size_t vector_idx) {
GPR_TIMER_SCOPE("CallbackUnaryClient::ThreadFunc", 0);
double start = UsageTimer::Now();
ctx_[vector_idx]->stub_->async()->UnaryCall(
(&ctx_[vector_idx]->context_), &request_, &ctx_[vector_idx]->response_,

@ -33,7 +33,6 @@
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/benchmark_service.grpc.pb.h"
#include "test/cpp/qps/client.h"
#include "test/cpp/qps/interarrival.h"
@ -125,7 +124,6 @@ class SynchronousUnaryClient final : public SynchronousClient {
}
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
double start = UsageTimer::Now();
GPR_TIMER_SCOPE("SynchronousUnaryClient::ThreadFunc", 0);
grpc::ClientContext context;
grpc::Status s =
stub->UnaryCall(&context, request_, &responses_[thread_idx]);
@ -243,7 +241,6 @@ class SynchronousStreamingPingPongClient final
if (!WaitToIssue(thread_idx)) {
return true;
}
GPR_TIMER_SCOPE("SynchronousStreamingPingPongClient::ThreadFunc", 0);
double start = UsageTimer::Now();
if (stream_[thread_idx]->Write(request_) &&
stream_[thread_idx]->Read(&responses_[thread_idx])) {
@ -304,7 +301,6 @@ class SynchronousStreamingFromClientClient final
if (!WaitToIssue(thread_idx)) {
return true;
}
GPR_TIMER_SCOPE("SynchronousStreamingFromClientClient::ThreadFunc", 0);
if (stream_[thread_idx]->Write(request_)) {
double now = UsageTimer::Now();
entry->set_value((now - last_issue_[thread_idx]) * 1e9);
@ -350,7 +346,6 @@ class SynchronousStreamingFromServerClient final
}
bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
GPR_TIMER_SCOPE("SynchronousStreamingFromServerClient::ThreadFunc", 0);
if (stream_[thread_idx]->Read(&responses_[thread_idx])) {
double now = UsageTimer::Now();
entry->set_value((now - last_recv_[thread_idx]) * 1e9);

@ -36,7 +36,6 @@
#include "src/core/lib/gpr/env.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/profiling/timers.h"
#include "src/proto/grpc/testing/worker_service.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -616,8 +615,6 @@ std::unique_ptr<ScenarioResult> RunScenario(
start,
gpr_time_from_seconds(warmup_seconds + benchmark_seconds, GPR_TIMESPAN)));
gpr_timer_set_enabled(0);
// Finish a run
std::unique_ptr<ScenarioResult> result(new ScenarioResult);
Histogram merged_latencies;

@ -115,12 +115,6 @@ build:ubsan --action_env=UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1:suppre
# build:ubsan --linkopt=-lubsan
# build:ubsan --linkopt=--rtlib=compiler-rt
build:basicprof --strip=never
build:basicprof --copt=-DNDEBUG
build:basicprof --copt=-O2
build:basicprof --copt=-DGRPC_BASIC_PROFILER
build:basicprof --copt=-DGRPC_TIMERS_RDTSC
build:python_single_threaded_unary_stream --test_env="GRPC_SINGLE_THREADED_UNARY_STREAM=true"
build:python_poller_engine --test_env="GRPC_ASYNCIO_ENGINE=poller"

@ -2230,9 +2230,6 @@ src/core/lib/load_balancing/lb_policy_registry.h \
src/core/lib/load_balancing/subchannel_interface.h \
src/core/lib/matchers/matchers.cc \
src/core/lib/matchers/matchers.h \
src/core/lib/profiling/basic_timers.cc \
src/core/lib/profiling/stap_timers.cc \
src/core/lib/profiling/timers.h \
src/core/lib/promise/activity.cc \
src/core/lib/promise/activity.h \
src/core/lib/promise/arena_promise.h \

@ -2023,9 +2023,6 @@ src/core/lib/load_balancing/lb_policy_registry.h \
src/core/lib/load_balancing/subchannel_interface.h \
src/core/lib/matchers/matchers.cc \
src/core/lib/matchers/matchers.h \
src/core/lib/profiling/basic_timers.cc \
src/core/lib/profiling/stap_timers.cc \
src/core/lib/profiling/timers.h \
src/core/lib/promise/activity.cc \
src/core/lib/promise/activity.h \
src/core/lib/promise/arena_promise.h \

@ -1,276 +0,0 @@
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import hashlib
import json
import math
import sys
import time
from six.moves import zip
import tabulate
SELF_TIME = object()
TIME_FROM_SCOPE_START = object()
TIME_TO_SCOPE_END = object()
TIME_FROM_STACK_START = object()
TIME_TO_STACK_END = object()
TIME_FROM_LAST_IMPORTANT = object()
argp = argparse.ArgumentParser(
description='Process output of basic_prof builds')
argp.add_argument('--source', default='latency_trace.txt', type=str)
argp.add_argument('--fmt', choices=tabulate.tabulate_formats, default='simple')
argp.add_argument('--out', default='-', type=str)
args = argp.parse_args()
class LineItem(object):
def __init__(self, line, indent):
self.tag = line['tag']
self.indent = indent
self.start_time = line['t']
self.end_time = None
self.important = line['imp']
self.filename = line['file']
self.fileline = line['line']
self.times = {}
class ScopeBuilder(object):
def __init__(self, call_stack_builder, line):
self.call_stack_builder = call_stack_builder
self.indent = len(call_stack_builder.stk)
self.top_line = LineItem(line, self.indent)
call_stack_builder.lines.append(self.top_line)
self.first_child_pos = len(call_stack_builder.lines)
def mark(self, line):
line_item = LineItem(line, self.indent + 1)
line_item.end_time = line_item.start_time
self.call_stack_builder.lines.append(line_item)
def finish(self, line):
assert line['tag'] == self.top_line.tag, (
'expected %s, got %s; thread=%s; t0=%f t1=%f' %
(self.top_line.tag, line['tag'], line['thd'],
self.top_line.start_time, line['t']))
final_time_stamp = line['t']
assert self.top_line.end_time is None
self.top_line.end_time = final_time_stamp
self.top_line.important = self.top_line.important or line['imp']
assert SELF_TIME not in self.top_line.times
self.top_line.times[
SELF_TIME] = final_time_stamp - self.top_line.start_time
for line in self.call_stack_builder.lines[self.first_child_pos:]:
if TIME_FROM_SCOPE_START not in line.times:
line.times[
TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
class CallStackBuilder(object):
def __init__(self):
self.stk = []
self.signature = hashlib.md5()
self.lines = []
def finish(self):
start_time = self.lines[0].start_time
end_time = self.lines[0].end_time
self.signature = self.signature.hexdigest()
last_important = start_time
for line in self.lines:
line.times[TIME_FROM_STACK_START] = line.start_time - start_time
line.times[TIME_TO_STACK_END] = end_time - line.end_time
line.times[
TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
if line.important:
last_important = line.end_time
last_important = end_time
def add(self, line):
line_type = line['type']
self.signature.update(line_type.encode('UTF-8'))
self.signature.update(line['tag'].encode('UTF-8'))
if line_type == '{':
self.stk.append(ScopeBuilder(self, line))
return False
elif line_type == '}':
assert self.stk, (
'expected non-empty stack for closing %s; thread=%s; t=%f' %
(line['tag'], line['thd'], line['t']))
self.stk.pop().finish(line)
if not self.stk:
self.finish()
return True
return False
elif line_type == '.' or line_type == '!':
if self.stk:
self.stk[-1].mark(line)
return False
else:
raise Exception('Unknown line type: \'%s\'' % line_type)
class CallStack(object):
def __init__(self, initial_call_stack_builder):
self.count = 1
self.signature = initial_call_stack_builder.signature
self.lines = initial_call_stack_builder.lines
for line in self.lines:
for key, val in list(line.times.items()):
line.times[key] = [val]
def add(self, call_stack_builder):
assert self.signature == call_stack_builder.signature
self.count += 1
assert len(self.lines) == len(call_stack_builder.lines)
for lsum, line in zip(self.lines, call_stack_builder.lines):
assert lsum.tag == line.tag
assert list(lsum.times.keys()) == list(line.times.keys())
for k, lst in list(lsum.times.items()):
lst.append(line.times[k])
def finish(self):
for line in self.lines:
for lst in list(line.times.values()):
lst.sort()
builder = collections.defaultdict(CallStackBuilder)
call_stacks = collections.defaultdict(CallStack)
lines = 0
start = time.time()
with open(args.source) as f:
for line in f:
lines += 1
inf = json.loads(line)
thd = inf['thd']
cs = builder[thd]
if cs.add(inf):
if cs.signature in call_stacks:
call_stacks[cs.signature].add(cs)
else:
call_stacks[cs.signature] = CallStack(cs)
del builder[thd]
time_taken = time.time() - start
call_stacks = sorted(list(call_stacks.values()),
key=lambda cs: cs.count,
reverse=True)
total_stacks = 0
for cs in call_stacks:
total_stacks += cs.count
cs.finish()
def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of an already sorted list of values.
@parameter N - is a list of values. MUST be already sorted.
@parameter percent - a float value from [0.0,1.0].
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
float_idx = (len(N) - 1) * percent
idx = int(float_idx)
result = key(N[idx])
if idx < len(N) - 1:
# interpolate with the next element's value
result += (float_idx - idx) * (key(N[idx + 1]) - key(N[idx]))
return result
def tidy_tag(tag):
if tag[0:10] == 'GRPC_PTAG_':
return tag[10:]
return tag
def time_string(values):
num_values = len(values)
return '%.1f/%.1f/%.1f' % (1e6 * percentile(values, 0.5), 1e6 * percentile(
values, 0.9), 1e6 * percentile(values, 0.99))
def time_format(idx):
def ent(line, idx=idx):
if idx in line.times:
return time_string(line.times[idx])
return ''
return ent
BANNER = {'simple': 'Count: %(count)d', 'html': '<h1>Count: %(count)d</h1>'}
FORMAT = [
('TAG', lambda line: '..' * line.indent + tidy_tag(line.tag)),
('LOC', lambda line: '%s:%d' %
(line.filename[line.filename.rfind('/') + 1:], line.fileline)),
('IMP', lambda line: '*' if line.important else ''),
('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
('SELF', time_format(SELF_TIME)),
('TO_STACK_END', time_format(TIME_TO_STACK_END)),
('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
('SELF', time_format(SELF_TIME)),
('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
]
out = sys.stdout
if args.out != '-':
out = open(args.out, 'w')
if args.fmt == 'html':
out.write('<html>')
out.write('<head>')
out.write('<title>Profile Report</title>')
out.write('</head>')
accounted_for = 0
for cs in call_stacks:
out.write('\n')
if args.fmt in BANNER:
out.write(BANNER[args.fmt] % {
'count': cs.count,
})
header, _ = list(zip(*FORMAT))
table = []
for line in cs.lines:
fields = []
for _, fn in FORMAT:
fields.append(fn(line))
table.append(fields)
out.write(tabulate.tabulate(table, header, tablefmt=args.fmt))
accounted_for += cs.count
if accounted_for > .99 * total_stacks:
break
if args.fmt == 'html':
print('</html>')

@ -19,9 +19,6 @@
"LSAN_OPTIONS": "suppressions=test/core/util/lsan_suppressions.txt:report_objects=1"
}
},
{
"config": "basicprof"
},
{
"config": "c++-compat"
},
@ -70,9 +67,6 @@
{
"config": "opt"
},
{
"config": "stapprof"
},
{
"config": "tsan",
"environ": {

Loading…
Cancel
Save