ESAN prototype

pull/5474/head
Craig Tiller 9 years ago
parent 1df086067c
commit ef1bf87665
  1. 8
      Makefile
  2. 3
      build.yaml
  3. 76
      src/core/iomgr/exec_ctx.c
  4. 20
      src/core/iomgr/exec_ctx.h
  5. 3
      src/core/iomgr/iomgr.c
  6. 4
      src/core/iomgr/workqueue_posix.c
  7. 3
      tools/run_tests/configs.json

@ -209,6 +209,14 @@ CPPFLAGS_mutrace = -O0
LDFLAGS_mutrace = -rdynamic LDFLAGS_mutrace = -rdynamic
DEFINES_mutrace = _DEBUG DEBUG DEFINES_mutrace = _DEBUG DEBUG
VALID_CONFIG_esan = 1
CC_esan = $(DEFAULT_CC)
CXX_esan = $(DEFAULT_CXX)
LD_esan = $(DEFAULT_CC)
LDXX_esan = $(DEFAULT_CXX)
CPPFLAGS_esan = -O0
DEFINES_esan = _DEBUG DEBUG GRPC_EXECUTION_CONTEXT_SANITIZER
# General settings. # General settings.

@ -2724,6 +2724,9 @@ configs:
dbg: dbg:
CPPFLAGS: -O0 CPPFLAGS: -O0
DEFINES: _DEBUG DEBUG DEFINES: _DEBUG DEBUG
esan:
CPPFLAGS: -O0
DEFINES: _DEBUG DEBUG GRPC_EXECUTION_CONTEXT_SANITIZER
gcov: gcov:
CC: gcc CC: gcc
CPPFLAGS: -O0 -fprofile-arcs -ftest-coverage -Wno-return-type CPPFLAGS: -O0 -fprofile-arcs -ftest-coverage -Wno-return-type

@ -34,9 +34,12 @@
#include "src/core/iomgr/exec_ctx.h" #include "src/core/iomgr/exec_ctx.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include "src/core/profiling/timers.h" #include "src/core/profiling/timers.h"
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
bool did_something = 0; bool did_something = 0;
GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0); GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
@ -74,3 +77,76 @@ void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(offload_target_or_null == NULL); GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure_list_move(list, &exec_ctx->closure_list); grpc_closure_list_move(list, &exec_ctx->closure_list);
} }
void grpc_exec_ctx_global_init(void) {}
void grpc_exec_ctx_global_shutdown(void) {}
#else
static gpr_mu g_mu;
static gpr_cv g_cv;
static int g_threads = 0;
static void run_closure(void *arg) {
grpc_closure *closure = arg;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(&g_mu);
if (--g_threads == 0) {
gpr_cv_signal(&g_cv);
}
gpr_mu_unlock(&g_mu);
}
static void start_closure(grpc_closure *closure) {
gpr_thd_id id;
gpr_mu_lock(&g_mu);
g_threads++;
gpr_mu_unlock(&g_mu);
gpr_thd_new(&id, run_closure, closure, NULL);
}
bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
return false;
}
void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
}
void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
bool success,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
closure->final_data = success;
start_closure(closure);
}
void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list,
grpc_workqueue *offload_target_or_null) {
GPR_ASSERT(offload_target_or_null == NULL);
grpc_closure *p = list->head;
while (p) {
grpc_closure *start = p;
p = grpc_closure_next(start);
start_closure(start);
}
grpc_closure_list r = GRPC_CLOSURE_LIST_INIT;
*list = r;
}
void grpc_exec_ctx_global_init(void) {
gpr_mu_init(&g_mu);
gpr_cv_init(&g_cv);
}
void grpc_exec_ctx_global_shutdown(void) {
gpr_mu_lock(&g_mu);
while (g_threads != 0) {
gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&g_mu);
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);
}
#endif

@ -36,6 +36,14 @@
#include "src/core/iomgr/closure.h" #include "src/core/iomgr/closure.h"
/* #define GRPC_EXECUTION_CONTEXT_SANITIZER 1 */
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
struct grpc_workqueue;
typedef struct grpc_workqueue grpc_workqueue;
#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
/** Execution context. /** Execution context.
* A bag of data that collects information along a callstack. * A bag of data that collects information along a callstack.
* Generally created at public API entry points, and passed down as * Generally created at public API entry points, and passed down as
@ -57,13 +65,12 @@ struct grpc_exec_ctx {
grpc_closure_list closure_list; grpc_closure_list closure_list;
}; };
/** A workqueue represents a list of work to be executed asynchronously.
Forward declared here to avoid a circular dependency with workqueue.h. */
struct grpc_workqueue;
typedef struct grpc_workqueue grpc_workqueue;
#define GRPC_EXEC_CTX_INIT \ #define GRPC_EXEC_CTX_INIT \
{ GRPC_CLOSURE_LIST_INIT } { GRPC_CLOSURE_LIST_INIT }
#else
struct grpc_exec_ctx {};
#define GRPC_EXEC_CTX_INIT {}
#endif
/** Flush any work that has been enqueued onto this grpc_exec_ctx. /** Flush any work that has been enqueued onto this grpc_exec_ctx.
* Caller must guarantee that no interfering locks are held. * Caller must guarantee that no interfering locks are held.
@ -82,4 +89,7 @@ void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
grpc_closure_list *list, grpc_closure_list *list,
grpc_workqueue *offload_target_or_null); grpc_workqueue *offload_target_or_null);
void grpc_exec_ctx_global_init(void);
void grpc_exec_ctx_global_shutdown(void);
#endif #endif

@ -42,6 +42,7 @@
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/thd.h> #include <grpc/support/thd.h>
#include "src/core/iomgr/exec_ctx.h"
#include "src/core/iomgr/iomgr_internal.h" #include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/timer.h" #include "src/core/iomgr/timer.h"
#include "src/core/support/string.h" #include "src/core/support/string.h"
@ -55,6 +56,7 @@ void grpc_iomgr_init(void) {
g_shutdown = 0; g_shutdown = 0;
gpr_mu_init(&g_mu); gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv); gpr_cv_init(&g_rcv);
grpc_exec_ctx_global_init();
grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC)); grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object; g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root"; g_root_object.name = "root";
@ -133,6 +135,7 @@ void grpc_iomgr_shutdown(void) {
grpc_pollset_global_shutdown(); grpc_pollset_global_shutdown();
grpc_iomgr_platform_shutdown(); grpc_iomgr_platform_shutdown();
grpc_exec_ctx_global_shutdown();
gpr_mu_destroy(&g_mu); gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_rcv); gpr_cv_destroy(&g_rcv);
} }

@ -107,7 +107,7 @@ void grpc_workqueue_flush(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {
if (grpc_closure_list_empty(workqueue->closure_list)) { if (grpc_closure_list_empty(workqueue->closure_list)) {
grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd); grpc_wakeup_fd_wakeup(&workqueue->wakeup_fd);
} }
grpc_closure_list_move(&exec_ctx->closure_list, &workqueue->closure_list); grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
gpr_mu_unlock(&workqueue->mu); gpr_mu_unlock(&workqueue->mu);
} }
@ -123,7 +123,7 @@ static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
gpr_free(workqueue); gpr_free(workqueue);
} else { } else {
gpr_mu_lock(&workqueue->mu); gpr_mu_lock(&workqueue->mu);
grpc_closure_list_move(&workqueue->closure_list, &exec_ctx->closure_list); grpc_exec_ctx_enqueue_list(exec_ctx, &workqueue->closure_list, NULL);
grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd); grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
gpr_mu_unlock(&workqueue->mu); gpr_mu_unlock(&workqueue->mu);
grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd, grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,

@ -63,5 +63,8 @@
}, },
{ {
"config": "mutrace" "config": "mutrace"
},
{
"config": "esan"
} }
] ]

Loading…
Cancel
Save