Simplify adding annotations, annotate more things

pull/3767/head
Craig Tiller 9 years ago
parent 8910ac6a36
commit 1f41b6b5ed
  1. 7
      src/core/channel/client_channel.c
  2. 5
      src/core/channel/compress_filter.c
  3. 1
      src/core/channel/connected_channel.c
  4. 3
      src/core/channel/http_client_filter.c
  5. 3
      src/core/channel/http_server_filter.c
  6. 5
      src/core/client_config/lb_policies/pick_first.c
  7. 8
      src/core/iomgr/exec_ctx.c
  8. 4
      src/core/iomgr/pollset_multipoller_with_epoll.c
  9. 12
      src/core/iomgr/pollset_posix.c
  10. 22
      src/core/iomgr/tcp_posix.c
  11. 28
      src/core/profiling/basic_timers.c
  12. 108
      src/core/profiling/timers.h
  13. 16
      src/core/support/alloc.c
  14. 16
      src/core/support/sync_posix.c
  15. 44
      src/core/surface/call.c
  16. 8
      src/core/surface/completion_queue.c
  17. 5
      src/core/transport/chttp2/parsing.c
  18. 5
      src/core/transport/chttp2/writing.c
  19. 20
      src/core/transport/chttp2_transport.c
  20. 6
      src/core/transport/stream_op.c
  21. 2
      src/core/tsi/fake_transport_security.c
  22. 4
      src/cpp/proto/proto_utils.cc
  23. 6
      src/cpp/server/server.cc
  24. 7
      test/cpp/qps/client_sync.cc
  25. 5
      tools/profiling/latency_profile/profile_analyzer.py

@ -237,7 +237,7 @@ static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
call_data *calld = arg;
grpc_pollset *pollset;
GRPC_TIMER_BEGIN(GRPC_PTAG_CHANNEL_PICKED_TARGET, 0);
GRPC_TIMER_BEGIN("picked_target", 0);
if (calld->picked_channel == NULL) {
/* treat this like a cancellation */
@ -260,7 +260,7 @@ static void picked_target(grpc_exec_ctx *exec_ctx, void *arg,
}
}
GRPC_TIMER_END(GRPC_PTAG_CHANNEL_PICKED_TARGET, 0);
GRPC_TIMER_END("picked_target", 0);
}
static grpc_closure *merge_into_waiting_op(grpc_call_element *elem,
@ -321,6 +321,7 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *subchannel_call;
grpc_lb_policy *lb_policy;
grpc_transport_stream_op op2;
GRPC_TIMER_BEGIN("perform_transport_stream_op", 0);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
@ -432,6 +433,8 @@ static void perform_transport_stream_op(grpc_exec_ctx *exec_ctx,
}
break;
}
GRPC_TIMER_END("perform_transport_stream_op", 0);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,

@ -41,6 +41,7 @@
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/profiling/timers.h"
#include "src/core/compression/message_compress.h"
#include "src/core/support/string.h"
@ -271,10 +272,14 @@ static void process_send_ops(grpc_call_element *elem,
static void compress_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_TIMER_BEGIN("compress_start_transport_stream_op", 0);
if (op->send_ops && op->send_ops->nops > 0) {
process_send_ops(elem, op->send_ops);
}
GRPC_TIMER_END("compress_start_transport_stream_op", 0);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}

@ -39,6 +39,7 @@
#include "src/core/support/string.h"
#include "src/core/transport/transport.h"
#include "src/core/profiling/timers.h"
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>

@ -36,6 +36,7 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include "src/core/profiling/timers.h"
typedef struct call_data {
grpc_linked_mdelem method;
@ -162,8 +163,10 @@ static void hc_mutate_op(grpc_call_element *elem,
static void hc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_TIMER_BEGIN("hc_start_transport_op", 0);
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
hc_mutate_op(elem, op);
GRPC_TIMER_END("hc_start_transport_op", 0);
grpc_call_next_op(exec_ctx, elem, op);
}

@ -36,6 +36,7 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
typedef struct call_data {
gpr_uint8 got_initial_metadata;
@ -230,8 +231,10 @@ static void hs_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
GRPC_TIMER_BEGIN("hs_start_transport_op", 0);
hs_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
GRPC_TIMER_END("hs_start_transport_op", 0);
}
/* Constructor for call_data */

@ -175,7 +175,7 @@ void pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
}
static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
@ -235,7 +235,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx, grpc_closure_create(destroy_subchannels, p), 1);
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;

@ -39,20 +39,20 @@
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
int did_something = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_EXEC_CTX_FLUSH, 0);
GRPC_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
grpc_closure *c = exec_ctx->closure_list.head;
exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
while (c != NULL) {
grpc_closure *next = c->next;
did_something++;
GRPC_TIMER_BEGIN(GRPC_PTAG_EXEC_CTX_STEP, 0);
GRPC_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
c->cb(exec_ctx, c->cb_arg, c->success);
GRPC_TIMER_END(GRPC_PTAG_EXEC_CTX_STEP, 0);
GRPC_TIMER_END("grpc_exec_ctx_flush.cb", 0);
c = next;
}
}
GRPC_TIMER_END(GRPC_PTAG_EXEC_CTX_FLUSH, 0);
GRPC_TIMER_END("grpc_exec_ctx_flush", 0);
return did_something;
}

@ -183,11 +183,11 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_TIMER_BEGIN(GRPC_PTAG_POLL, 0);
GRPC_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
GRPC_SCHEDULING_END_BLOCKING_REGION;
GRPC_TIMER_END(GRPC_PTAG_POLL, 0);
GRPC_TIMER_END("poll", 0);
if (poll_rv < 0) {
if (errno != EINTR) {

@ -229,7 +229,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
int locked = 1;
int queued_work = 0;
int keep_polling = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_POLLSET_WORK, 0);
GRPC_TIMER_BEGIN("grpc_pollset_work", 0);
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
worker->reevaluate_polling_on_wakeup = 0;
@ -274,10 +274,10 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
GRPC_TIMER_BEGIN(GRPC_PTAG_POLLSET_WORK, 0);
GRPC_TIMER_BEGIN("maybe_work_and_unlock", 0);
pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
deadline, now);
GRPC_TIMER_END(GRPC_PTAG_POLLSET_WORK, 0);
GRPC_TIMER_END("maybe_work_and_unlock", 0);
locked = 0;
gpr_tls_set(&g_current_thread_poller, 0);
gpr_tls_set(&g_current_thread_worker, 0);
@ -332,7 +332,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_mu_lock(&pollset->mu);
}
}
GRPC_TIMER_END(GRPC_PTAG_POLLSET_WORK, 0);
GRPC_TIMER_END("grpc_pollset_work", 0);
}
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -567,11 +567,11 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
even going into the blocking annotation if possible */
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
GRPC_TIMER_BEGIN(GRPC_PTAG_POLL, 0);
GRPC_TIMER_BEGIN("poll", 0);
GRPC_SCHEDULING_START_BLOCKING_REGION;
r = grpc_poll_function(pfd, nfds, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;
GRPC_TIMER_END(GRPC_PTAG_POLL, 0);
GRPC_TIMER_END("poll", 0);
if (r < 0) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));

@ -180,7 +180,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
GPR_ASSERT(!tcp->finished_edge);
GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0);
GRPC_TIMER_BEGIN("tcp_continue_read", 0);
while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
@ -199,11 +199,11 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 1);
GRPC_TIMER_BEGIN("recvmsg", 1);
do {
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0);
GRPC_TIMER_END("recvmsg", 0);
if (read_bytes < 0) {
/* NB: After calling call_read_cb a parallel call of the read handler may
@ -240,7 +240,7 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
TCP_UNREF(exec_ctx, tcp, "read");
}
GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0);
GRPC_TIMER_END("tcp_continue_read", 0);
}
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
@ -316,12 +316,12 @@ static flush_result tcp_flush(grpc_tcp *tcp) {
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 1);
GRPC_TIMER_BEGIN("sendmsg", 1);
do {
/* TODO(klempner): Cork if this is a partial write */
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
GRPC_TIMER_END("sendmsg", 0);
if (sent_length < 0) {
if (errno == EAGAIN) {
@ -370,17 +370,17 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
return;
}
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0);
status = tcp_flush(tcp);
if (status == FLUSH_PENDING) {
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
GRPC_TIMER_BEGIN("tcp_handle_write.cb", 0);
cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE);
GRPC_TIMER_END("tcp_handle_write.cb", 0);
TCP_UNREF(exec_ctx, tcp, "write");
}
GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0);
}
static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -399,11 +399,11 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
}
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0);
GRPC_TIMER_BEGIN("tcp_write", 0);
GPR_ASSERT(tcp->write_cb == NULL);
if (buf->length == 0) {
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
GRPC_TIMER_END("tcp_write", 0);
grpc_exec_ctx_enqueue(exec_ctx, cb, 1);
return;
}
@ -420,7 +420,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_exec_ctx_enqueue(exec_ctx, cb, status == FLUSH_DONE);
}
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0);
GRPC_TIMER_END("tcp_write", 0);
}
static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,

@ -44,11 +44,7 @@
#include <grpc/support/thd.h>
#include <stdio.h>
typedef enum {
BEGIN = '{',
END = '}',
MARK = '.'
} marker_type;
typedef enum { BEGIN = '{', END = '}', MARK = '.' } marker_type;
typedef struct grpc_timer_entry {
gpr_timespec tm;
@ -91,7 +87,7 @@ static void log_report() {
g_count = 0;
}
static void grpc_timers_log_add(int tag, const char *tagstr, marker_type type,
static void grpc_timers_log_add(const char *tagstr, marker_type type,
int important, const char *file, int line) {
grpc_timer_entry *entry;
@ -111,25 +107,19 @@ static void grpc_timers_log_add(int tag, const char *tagstr, marker_type type,
}
/* Latency profiler API implementation. */
void grpc_timer_add_mark(int tag, const char *tagstr, int important,
const char *file, int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, MARK, important, file, line);
}
void grpc_timer_add_mark(const char *tagstr, int important, const char *file,
int line) {
grpc_timers_log_add(tagstr, MARK, important, file, line);
}
void grpc_timer_begin(int tag, const char *tagstr, int important, const char *file,
void grpc_timer_begin(const char *tagstr, int important, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, BEGIN, important, file, line);
}
grpc_timers_log_add(tagstr, BEGIN, important, file, line);
}
void grpc_timer_end(int tag, const char *tagstr, int important, const char *file,
void grpc_timer_end(const char *tagstr, int important, const char *file,
int line) {
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) {
grpc_timers_log_add(tag, tagstr, END, important, file, line);
}
grpc_timers_log_add(tagstr, END, important, file, line);
}
/* Basic profiler specific API functions. */

@ -41,83 +41,25 @@ extern "C" {
void grpc_timers_global_init(void);
void grpc_timers_global_destroy(void);
void grpc_timer_add_mark(int tag, const char *tagstr, int important,
const char *file, int line);
void grpc_timer_begin(int tag, const char *tagstr, int important, const char *file,
void grpc_timer_add_mark(const char *tagstr, int important, const char *file,
int line);
void grpc_timer_begin(const char *tagstr, int important, const char *file,
int line);
void grpc_timer_end(int tag, const char *tagstr, int important, const char *file,
void grpc_timer_end(const char *tagstr, int important, const char *file,
int line);
enum grpc_profiling_tags {
/* Any GRPC_PTAG_* >= than the threshold won't generate any profiling mark. */
GRPC_PTAG_IGNORE_THRESHOLD = 1000000,
/* Re. Protos. */
GRPC_PTAG_PROTO_SERIALIZE = 100 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_PROTO_DESERIALIZE = 101 + GRPC_PTAG_IGNORE_THRESHOLD,
/* Re. sockets. */
GRPC_PTAG_HANDLE_READ = 200 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_SENDMSG = 201,
GRPC_PTAG_RECVMSG = 202,
GRPC_PTAG_POLL = 203,
GRPC_PTAG_TCP_CB_WRITE = 204 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_TCP_WRITE = 205 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_BECOME_READABLE = 207,
GRPC_PTAG_MUTEX_LOCK = 250,
GRPC_PTAG_MUTEX_UNLOCK = 254,
GRPC_PTAG_MALLOC = 251,
GRPC_PTAG_REALLOC = 252,
GRPC_PTAG_FREE = 253,
/* C++ */
GRPC_PTAG_CPP_CALL_CREATED = 300 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_CPP_PERFORM_OPS = 301 + GRPC_PTAG_IGNORE_THRESHOLD,
GRPC_PTAG_CLIENT_UNARY_CALL = 302,
GRPC_PTAG_SERVER_CALL = 303,
GRPC_PTAG_SERVER_CALLBACK = 304,
/* Transports */
GRPC_PTAG_HTTP2_RECV_DATA = 400,
GRPC_PTAG_HTTP2_UNLOCK = 401,
GRPC_PTAG_HTTP2_WRITING_ACTION = 402,
GRPC_PTAG_HTTP2_TERMINATE_WRITING = 403,
/* Completion queue */
GRPC_PTAG_CQ_NEXT = 501,
GRPC_PTAG_CQ_PLUCK = 502,
GRPC_PTAG_POLLSET_WORK = 503,
GRPC_PTAG_EXEC_CTX_FLUSH = 504,
GRPC_PTAG_EXEC_CTX_STEP = 505,
/* Surface */
GRPC_PTAG_CALL_START_BATCH = 600,
GRPC_PTAG_CALL_ON_DONE_RECV = 601,
GRPC_PTAG_CALL_UNLOCK = 602,
GRPC_PTAG_CALL_ON_DONE_SEND = 602,
/* Channel */
GRPC_PTAG_CHANNEL_PICKED_TARGET = 700,
/* > 1024 Unassigned reserved. For any miscellaneous use.
* Use addition to generate tags from this base or take advantage of the 10
* zero'd bits for OR-ing. */
GRPC_PTAG_OTHER_BASE = 1024
};
#if !(defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER))
/* No profiling. No-op all the things. */
#define GRPC_TIMER_MARK(tag, important) \
do { \
do { \
} while (0)
#define GRPC_TIMER_BEGIN(tag, important) \
do { \
do { \
} while (0)
#define GRPC_TIMER_END(tag, important) \
do { \
do { \
} while (0)
#else /* at least one profiler requested... */
@ -127,22 +69,14 @@ enum grpc_profiling_tags {
#endif
/* Generic profiling interface. */
#define GRPC_TIMER_MARK(tag, important) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_add_mark(tag, #tag, important, __FILE__, \
__LINE__); \
}
#define GRPC_TIMER_MARK(tag, important) \
grpc_timer_add_mark(tag, important, __FILE__, __LINE__);
#define GRPC_TIMER_BEGIN(tag, important) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_begin(tag, #tag, important, __FILE__, \
__LINE__); \
}
#define GRPC_TIMER_BEGIN(tag, important) \
grpc_timer_begin(tag, important, __FILE__, __LINE__);
#define GRPC_TIMER_END(tag, important) \
if (tag < GRPC_PTAG_IGNORE_THRESHOLD) { \
grpc_timer_end(tag, #tag, important, __FILE__, __LINE__); \
}
#define GRPC_TIMER_END(tag, important) \
grpc_timer_end(tag, important, __FILE__, __LINE__);
#ifdef GRPC_STAP_PROFILER
/* Empty placeholder for now. */
@ -156,6 +90,22 @@ enum grpc_profiling_tags {
#ifdef __cplusplus
}
namespace grpc {
class ProfileScope {
public:
ProfileScope(const char *desc, bool important) : desc_(desc) {
GRPC_TIMER_BEGIN(desc_, important ? 1 : 0);
}
~ProfileScope() { GRPC_TIMER_END(desc_, 0); }
private:
const char *const desc_;
};
}
#define GRPC_TIMER_SCOPE(tag, important) \
ProfileScope _profile_scope_##__LINE__((tag), (important))
#endif
#endif /* GRPC_CORE_PROFILING_TIMERS_H */

@ -39,28 +39,28 @@
void *gpr_malloc(size_t size) {
void *p;
GRPC_TIMER_BEGIN(GRPC_PTAG_MALLOC, 0);
GRPC_TIMER_BEGIN("gpr_malloc", 0);
p = malloc(size);
if (!p) {
abort();
}
GRPC_TIMER_END(GRPC_PTAG_MALLOC, 0);
GRPC_TIMER_END("gpr_malloc", 0);
return p;
}
void gpr_free(void *p) {
GRPC_TIMER_BEGIN(GRPC_PTAG_FREE, 0);
free(p);
GRPC_TIMER_END(GRPC_PTAG_FREE, 0);
void gpr_free(void *p) {
GRPC_TIMER_BEGIN("gpr_free", 0);
free(p);
GRPC_TIMER_END("gpr_free", 0);
}
void *gpr_realloc(void *p, size_t size) {
GRPC_TIMER_BEGIN(GRPC_PTAG_REALLOC, 0);
GRPC_TIMER_BEGIN("gpr_realloc", 0);
p = realloc(p, size);
if (!p) {
abort();
}
GRPC_TIMER_END(GRPC_PTAG_REALLOC, 0);
GRPC_TIMER_END("gpr_realloc", 0);
return p;
}

@ -46,16 +46,16 @@ void gpr_mu_init(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_init(mu, NULL) == 0); }
void gpr_mu_destroy(gpr_mu* mu) { GPR_ASSERT(pthread_mutex_destroy(mu) == 0); }
void gpr_mu_lock(gpr_mu* mu) {
GRPC_TIMER_BEGIN(GRPC_PTAG_MUTEX_LOCK, 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);
GRPC_TIMER_END(GRPC_PTAG_MUTEX_LOCK, 0);
void gpr_mu_lock(gpr_mu* mu) {
GRPC_TIMER_BEGIN("gpr_mu_lock", 0);
GPR_ASSERT(pthread_mutex_lock(mu) == 0);
GRPC_TIMER_END("gpr_mu_lock", 0);
}
void gpr_mu_unlock(gpr_mu* mu) {
GRPC_TIMER_BEGIN(GRPC_PTAG_MUTEX_UNLOCK, 0);
GPR_ASSERT(pthread_mutex_unlock(mu) == 0);
GRPC_TIMER_END(GRPC_PTAG_MUTEX_UNLOCK, 0);
void gpr_mu_unlock(gpr_mu* mu) {
GRPC_TIMER_BEGIN("gpr_mu_unlock", 0);
GPR_ASSERT(pthread_mutex_unlock(mu) == 0);
GRPC_TIMER_END("gpr_mu_unlock", 0);
}
int gpr_mu_trylock(gpr_mu* mu) {

@ -306,8 +306,9 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
grpc_transport_stream_op *initial_op_ptr = NULL;
grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_call *call =
gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
grpc_call *call;
GRPC_TIMER_BEGIN("grpc_call_create", 0);
call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
memset(call, 0, sizeof(grpc_call));
gpr_mu_init(&call->mu);
gpr_mu_init(&call->completion_mu);
@ -401,6 +402,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
set_deadline_alarm(&exec_ctx, call, send_deadline);
}
grpc_exec_ctx_finish(&exec_ctx);
GRPC_TIMER_END("grpc_call_create", 0);
return call;
}
@ -462,6 +464,7 @@ void grpc_call_internal_ref(grpc_call *c) {
static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
size_t i;
grpc_call *c = call;
GRPC_TIMER_BEGIN("destroy_call", 0);
grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
gpr_mu_destroy(&c->mu);
@ -494,6 +497,7 @@ static void destroy_call(grpc_exec_ctx *exec_ctx, grpc_call *call) {
GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
}
gpr_free(c);
GRPC_TIMER_END("destroy_call", 0);
}
#ifdef GRPC_CALL_REF_COUNT_DEBUG
@ -613,7 +617,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
const size_t MAX_RECV_PEEK_AHEAD = 65536;
size_t buffered_bytes;
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_UNLOCK, 0);
GRPC_TIMER_BEGIN("unlock", 0);
memset(&op, 0, sizeof(op));
@ -686,7 +690,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_call *call) {
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completing");
}
GRPC_TIMER_END(GRPC_PTAG_CALL_UNLOCK, 0);
GRPC_TIMER_END("unlock", 0);
}
static void get_final_status(grpc_call *call, grpc_ioreq_data out) {
@ -836,7 +840,7 @@ static void early_out_write_ops(grpc_call *call) {
static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *call = pc;
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_SEND, 0);
GRPC_TIMER_BEGIN("call_on_done_send", 0);
lock(call);
if (call->last_send_contains & (1 << GRPC_IOREQ_SEND_INITIAL_METADATA)) {
finish_ioreq_op(call, GRPC_IOREQ_SEND_INITIAL_METADATA, success);
@ -860,10 +864,11 @@ static void call_on_done_send(grpc_exec_ctx *exec_ctx, void *pc, int success) {
call->sending = 0;
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "sending");
GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_SEND, 0);
GRPC_TIMER_END("call_on_done_send", 0);
}
static void finish_message(grpc_call *call) {
GRPC_TIMER_BEGIN("finish_message", 0);
if (call->error_status_set == 0) {
/* TODO(ctiller): this could be a lot faster if coded directly */
grpc_byte_buffer *byte_buffer;
@ -883,6 +888,7 @@ static void finish_message(grpc_call *call) {
gpr_slice_buffer_reset_and_unref(&call->incoming_message);
GPR_ASSERT(call->incoming_message.count == 0);
call->reading_message = 0;
GRPC_TIMER_END("finish_message", 0);
}
static int begin_message(grpc_call *call, grpc_begin_message msg) {
@ -972,7 +978,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
grpc_call *child_call;
grpc_call *next_child_call;
size_t i;
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
GRPC_TIMER_BEGIN("call_on_done_recv", 0);
lock(call);
call->receiving = 0;
if (success) {
@ -982,13 +988,19 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
case GRPC_NO_OP:
break;
case GRPC_OP_METADATA:
GRPC_TIMER_BEGIN("recv_metadata", 0);
recv_metadata(exec_ctx, call, &op->data.metadata);
GRPC_TIMER_END("recv_metadata", 0);
break;
case GRPC_OP_BEGIN_MESSAGE:
GRPC_TIMER_BEGIN("begin_message", 0);
success = begin_message(call, op->data.begin_message);
GRPC_TIMER_END("begin_message", 0);
break;
case GRPC_OP_SLICE:
GRPC_TIMER_BEGIN("add_slice_to_message", 0);
success = add_slice_to_message(call, op->data.slice);
GRPC_TIMER_END("add_slice_to_message", 0);
break;
}
}
@ -1034,7 +1046,7 @@ static void call_on_done_recv(grpc_exec_ctx *exec_ctx, void *pc, int success) {
unlock(exec_ctx, call);
GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "receiving");
GRPC_TIMER_END(GRPC_PTAG_CALL_ON_DONE_RECV, 0);
GRPC_TIMER_END("call_on_done_recv", 0);
}
static int prepare_application_metadata(grpc_call *call, size_t count,
@ -1512,16 +1524,25 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_mdelem *mdel = l->md;
grpc_mdstr *key = mdel->key;
if (key == grpc_channel_get_status_string(call->channel)) {
GRPC_TIMER_BEGIN("status", 0);
set_status_code(call, STATUS_FROM_WIRE, decode_status(mdel));
GRPC_TIMER_END("status", 0);
} else if (key == grpc_channel_get_message_string(call->channel)) {
GRPC_TIMER_BEGIN("status-details", 0);
set_status_details(call, STATUS_FROM_WIRE, GRPC_MDSTR_REF(mdel->value));
GRPC_TIMER_END("status-details", 0);
} else if (key ==
grpc_channel_get_compression_algorithm_string(call->channel)) {
GRPC_TIMER_BEGIN("compression_algorithm", 0);
set_compression_algorithm(call, decode_compression(mdel));
GRPC_TIMER_END("compression_algorithm", 0);
} else if (key == grpc_channel_get_encodings_accepted_by_peer_string(
call->channel)) {
GRPC_TIMER_BEGIN("encodings_accepted_by_peer", 0);
set_encodings_accepted_by_peer(call, mdel->value->slice);
GRPC_TIMER_END("encodings_accepted_by_peer", 0);
} else {
GRPC_TIMER_BEGIN("report_up", 0);
dest = &call->buffered_metadata[is_trailing];
if (dest->count == dest->capacity) {
dest->capacity = GPR_MAX(dest->capacity + 8, dest->capacity * 2);
@ -1542,12 +1563,15 @@ static void recv_metadata(grpc_exec_ctx *exec_ctx, grpc_call *call,
}
call->owned_metadata[call->owned_metadata_count++] = mdel;
l->md = NULL;
GRPC_TIMER_END("report_up", 0);
}
}
if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
0 &&
!call->is_client) {
GRPC_TIMER_BEGIN("set_deadline_alarm", 0);
set_deadline_alarm(exec_ctx, call, md->deadline);
GRPC_TIMER_END("set_deadline_alarm", 0);
}
if (!is_trailing) {
call->read_state = READ_STATE_GOT_INITIAL_METADATA;
@ -1610,7 +1634,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_TIMER_BEGIN(GRPC_PTAG_CALL_START_BATCH, 0);
GRPC_TIMER_BEGIN("grpc_call_start_batch", 0);
GRPC_API_TRACE(
"grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
@ -1849,7 +1873,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
finish_func, tag);
done:
grpc_exec_ctx_finish(&exec_ctx);
GRPC_TIMER_END(GRPC_PTAG_CALL_START_BATCH, 0);
GRPC_TIMER_END("grpc_call_start_batch", 0);
return error;
}

@ -185,7 +185,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec now;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_TIMER_BEGIN(GRPC_PTAG_CQ_NEXT, 0);
GRPC_TIMER_BEGIN("grpc_completion_queue_next", 0);
GRPC_API_TRACE(
"grpc_completion_queue_next("
@ -234,7 +234,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
GRPC_CQ_INTERNAL_UNREF(cc, "next");
grpc_exec_ctx_finish(&exec_ctx);
GRPC_TIMER_END(GRPC_PTAG_CQ_NEXT, 0);
GRPC_TIMER_END("grpc_completion_queue_next", 0);
return ret;
}
@ -274,7 +274,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
int first_loop = 1;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_TIMER_BEGIN(GRPC_PTAG_CQ_PLUCK, 0);
GRPC_TIMER_BEGIN("grpc_completion_queue_pluck", 0);
GRPC_API_TRACE(
"grpc_completion_queue_pluck("
@ -342,7 +342,7 @@ done:
GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
grpc_exec_ctx_finish(&exec_ctx);
GRPC_TIMER_END(GRPC_PTAG_CQ_PLUCK, 0);
GRPC_TIMER_END("grpc_completion_queue_pluck", 0);
return ret;
}

@ -35,6 +35,7 @@
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2/status_conversion.h"
#include "src/core/transport/chttp2/timeout_encoding.h"
@ -68,6 +69,8 @@ void grpc_chttp2_prepare_to_read(
grpc_chttp2_stream_global *stream_global;
grpc_chttp2_stream_parsing *stream_parsing;
GRPC_TIMER_BEGIN("grpc_chttp2_prepare_to_read", 0);
transport_parsing->next_stream_id = transport_global->next_stream_id;
/* update the parsing view of incoming window */
@ -89,6 +92,8 @@ void grpc_chttp2_prepare_to_read(
stream_parsing->incoming_window = stream_global->incoming_window;
}
}
GRPC_TIMER_END("grpc_chttp2_prepare_to_read", 0);
}
void grpc_chttp2_publish_reads(

@ -37,6 +37,7 @@
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/chttp2/http2_errors.h"
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing);
@ -180,6 +181,8 @@ void grpc_chttp2_perform_writes(
static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
grpc_chttp2_stream_writing *stream_writing;
GRPC_TIMER_BEGIN("finalize_outbuf", 0);
while (
grpc_chttp2_list_pop_writing_stream(transport_writing, &stream_writing)) {
if (stream_writing->sopb.nops > 0 ||
@ -208,6 +211,8 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
}
grpc_chttp2_list_add_written_stream(transport_writing, stream_writing);
}
GRPC_TIMER_END("finalize_outbuf", 0);
}
void grpc_chttp2_cleanup_writing(

@ -510,6 +510,7 @@ grpc_chttp2_stream_parsing *grpc_chttp2_parsing_accept_stream(
static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
GRPC_TIMER_BEGIN("unlock", 0);
unlock_check_read_write_state(exec_ctx, t);
if (!t->writing_active && !t->closed &&
grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
@ -520,6 +521,7 @@ static void unlock(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
}
gpr_mu_unlock(&t->mu);
GRPC_TIMER_END("unlock", 0);
}
/*
@ -546,7 +548,7 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr;
grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing);
GRPC_TIMER_BEGIN(GRPC_PTAG_HTTP2_TERMINATE_WRITING, 0);
GRPC_TIMER_BEGIN("grpc_chttp2_terminate_writing", 0);
lock(t);
@ -569,16 +571,16 @@ void grpc_chttp2_terminate_writing(grpc_exec_ctx *exec_ctx,
unlock(exec_ctx, t);
UNREF_TRANSPORT(exec_ctx, t, "writing");
GRPC_TIMER_END(GRPC_PTAG_HTTP2_TERMINATE_WRITING, 0);
GRPC_TIMER_END("grpc_chttp2_terminate_writing", 0);
}
static void writing_action(grpc_exec_ctx *exec_ctx, void *gt,
int iomgr_success_ignored) {
grpc_chttp2_transport *t = gt;
GRPC_TIMER_BEGIN(GRPC_PTAG_HTTP2_WRITING_ACTION, 0);
GRPC_TIMER_BEGIN("writing_action", 0);
grpc_chttp2_perform_writes(exec_ctx, &t->writing, t->ep);
GRPC_TIMER_END(GRPC_PTAG_HTTP2_WRITING_ACTION, 0);
GRPC_TIMER_END("writing_action", 0);
}
void grpc_chttp2_add_incoming_goaway(
@ -648,6 +650,7 @@ static void maybe_start_some_streams(
static void perform_stream_op_locked(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global, grpc_transport_stream_op *op) {
GRPC_TIMER_BEGIN("perform_stream_op_locked", 0);
if (op->cancel_with_status != GRPC_STATUS_OK) {
cancel_from_api(transport_global, stream_global, op->cancel_with_status);
}
@ -719,6 +722,7 @@ static void perform_stream_op_locked(
}
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GRPC_TIMER_END("perform_stream_op_locked", 0);
}
static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
@ -1109,7 +1113,7 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
int keep_reading = 0;
grpc_chttp2_transport *t = tp;
GRPC_TIMER_BEGIN(GRPC_PTAG_HTTP2_RECV_DATA, 0);
GRPC_TIMER_BEGIN("recv_data", 0);
lock(t);
i = 0;
@ -1121,11 +1125,13 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
&t->parsing_stream_map);
grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
gpr_mu_unlock(&t->mu);
GRPC_TIMER_BEGIN("recv_data.parse", 0);
for (; i < t->read_buffer.count &&
grpc_chttp2_perform_read(exec_ctx, &t->parsing,
t->read_buffer.slices[i]);
i++)
;
GRPC_TIMER_END("recv_data.parse", 0);
gpr_mu_lock(&t->mu);
if (i != t->read_buffer.count) {
drop_connection(exec_ctx, t);
@ -1163,7 +1169,7 @@ static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
UNREF_TRANSPORT(exec_ctx, t, "recv_data");
}
GRPC_TIMER_END(GRPC_PTAG_HTTP2_RECV_DATA, 0);
GRPC_TIMER_END("recv_data", 0);
}
/*

@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/profiling/timers.h"
/* Exponential growth function: Given x, return a larger x.
Currently we grow by 1.5 times upon reallocation. */
#define GROW(x) (3 * (x) / 2)
@ -300,6 +302,8 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
grpc_linked_mdelem *l;
grpc_linked_mdelem *next;
GRPC_TIMER_BEGIN("grpc_metadata_batch_filter", 0);
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
for (l = batch->list.head; l; l = next) {
@ -328,4 +332,6 @@ void grpc_metadata_batch_filter(grpc_metadata_batch *batch,
}
assert_valid_list(&batch->list);
assert_valid_list(&batch->garbage);
GRPC_TIMER_END("grpc_metadata_batch_filter", 0);
}

@ -121,7 +121,7 @@ static void store32_little_endian(gpr_uint32 value, unsigned char *buf) {
buf[3] = (unsigned char)((value >> 24) & 0xFF);
buf[2] = (unsigned char)((value >> 16) & 0xFF);
buf[1] = (unsigned char)((value >> 8) & 0xFF);
buf[0] = (unsigned char)((value) & 0xFF);
buf[0] = (unsigned char)((value)&0xFF);
}
static void tsi_fake_frame_reset(tsi_fake_frame *frame, int needs_draining) {

@ -42,6 +42,8 @@
#include <grpc/support/port_platform.h>
#include <grpc++/support/config.h>
#include "src/core/profiling/timers.h"
const int kMaxBufferLength = 8192;
class GrpcBufferWriter GRPC_FINAL
@ -158,6 +160,7 @@ namespace grpc {
Status SerializeProto(const grpc::protobuf::Message& msg,
grpc_byte_buffer** bp) {
GRPC_TIMER_SCOPE("SerializeProto", 0);
int byte_size = msg.ByteSize();
if (byte_size <= kMaxBufferLength) {
gpr_slice slice = gpr_slice_malloc(byte_size);
@ -176,6 +179,7 @@ Status SerializeProto(const grpc::protobuf::Message& msg,
Status DeserializeProto(grpc_byte_buffer* buffer, grpc::protobuf::Message* msg,
int max_message_size) {
GRPC_TIMER_SCOPE("DeserializeProto", 0);
if (!buffer) {
return Status(StatusCode::INTERNAL, "No payload");
}

@ -541,7 +541,7 @@ void Server::ScheduleCallback() {
void Server::RunRpc() {
// Wait for one more incoming rpc.
bool ok;
GRPC_TIMER_BEGIN(GRPC_PTAG_SERVER_CALL, 0);
GRPC_TIMER_SCOPE("Server::RunRpc", 0);
auto* mrd = SyncRequest::Wait(&cq_, &ok);
if (mrd) {
ScheduleCallback();
@ -557,12 +557,10 @@ void Server::RunRpc() {
mrd->TeardownRequest();
}
}
GRPC_TIMER_BEGIN(GRPC_PTAG_SERVER_CALLBACK, 0);
GRPC_TIMER_SCOPE("cd.Run()", 0);
cd.Run();
GRPC_TIMER_END(GRPC_PTAG_SERVER_CALLBACK, 0);
}
}
GRPC_TIMER_END(GRPC_PTAG_SERVER_CALL, 0);
{
grpc::unique_lock<grpc::mutex> lock(mu_);

@ -101,11 +101,10 @@ class SynchronousUnaryClient GRPC_FINAL : public SynchronousClient {
WaitToIssue(thread_idx);
auto* stub = channels_[thread_idx % channels_.size()].get_stub();
double start = Timer::Now();
GRPC_TIMER_SCOPE("SynchronousUnaryClient::ThreadFunc", 0);
grpc::ClientContext context;
GRPC_TIMER_BEGIN(GRPC_PTAG_CLIENT_UNARY_CALL, 0);
grpc::Status s =
stub->UnaryCall(&context, request_, &responses_[thread_idx]);
GRPC_TIMER_END(GRPC_PTAG_CLIENT_UNARY_CALL, 0);
histogram->Add((Timer::Now() - start) * 1e9);
return s.ok();
}
@ -139,15 +138,13 @@ class SynchronousStreamingClient GRPC_FINAL : public SynchronousClient {
bool ThreadFunc(Histogram* histogram, size_t thread_idx) GRPC_OVERRIDE {
WaitToIssue(thread_idx);
GRPC_TIMER_SCOPE("SynchronousStreamingClient::ThreadFunc", 0);
double start = Timer::Now();
GRPC_TIMER_BEGIN(GRPC_PTAG_CLIENT_UNARY_CALL, 0);
if (stream_[thread_idx]->Write(request_) &&
stream_[thread_idx]->Read(&responses_[thread_idx])) {
GRPC_TIMER_END(GRPC_PTAG_CLIENT_UNARY_CALL, 0);
histogram->Add((Timer::Now() - start) * 1e9);
return true;
}
GRPC_TIMER_END(GRPC_PTAG_CLIENT_UNARY_CALL, 0);
return false;
}

@ -29,6 +29,8 @@ class LineItem(object):
self.start_time = line['t']
self.end_time = None
self.important = line['imp']
self.filename = line['file']
self.fileline = line['line']
self.times = {}
@ -48,7 +50,7 @@ class ScopeBuilder(object):
def finish(self, line):
assert line['tag'] == self.top_line.tag
assert line['tag'] == self.top_line.tag, 'expected %s, got %s' % (self.top_line.tag, line['tag'])
final_time_stamp = line['t']
assert self.top_line.end_time is None
self.top_line.end_time = final_time_stamp
@ -187,6 +189,7 @@ def time_format(idx):
FORMAT = [
('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)),
('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)),
('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
('SELF', time_format(SELF_TIME)),
('TO_STACK_END', time_format(TIME_TO_STACK_END)),

Loading…
Cancel
Save