Progress on a finalization list

pull/7644/head
Craig Tiller 9 years ago
parent 919175b619
commit a36857da24
  1. 2
      src/core/lib/iomgr/closure.h
  2. 76
      src/core/lib/iomgr/combiner.c
  3. 8
      src/core/lib/iomgr/combiner.h
  4. 14
      test/core/iomgr/combiner_test.c

@ -89,6 +89,8 @@ grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
#define GRPC_CLOSURE_LIST_INIT \
{ NULL, NULL }
void grpc_closure_list_init(grpc_closure_list *list);
/** add \a closure to the end of \a list
and set \a closure's result to \a error */
void grpc_closure_list_append(grpc_closure_list *list, grpc_closure *closure,

@ -45,18 +45,18 @@ struct grpc_combiner {
// lower bit - zero if orphaned
// other bits - number of items queued on the lock
gpr_atm state;
bool take_async_break_before_final_list;
grpc_closure_list final_list;
grpc_closure continue_finishing;
};
static void continue_finishing(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
grpc_combiner *lock = gpr_malloc(sizeof(*lock));
lock->optional_workqueue = optional_workqueue;
gpr_atm_no_barrier_store(&lock->state, 1);
gpr_mpscq_init(&lock->queue);
grpc_closure_init(&lock->continue_finishing, continue_finishing, lock);
lock->take_async_break_before_final_list = false;
grpc_closure_list_init(&lock->final_list);
return lock;
}
@ -72,11 +72,52 @@ void grpc_combiner_destroy(grpc_combiner *lock) {
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
static void continue_finishing_mainline(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
if (maybe_finish_one(exec_ctx, arg)) finish(exec_ctx, arg);
}
static void execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
grpc_closure *c = lock->final_list.head;
grpc_closure_list_init(&lock->final_list);
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error;
c->cb(exec_ctx, c->cb_arg, error);
GRPC_ERROR_UNREF(error);
c = next;
}
}
static void continue_executing_final(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
execute_final(exec_ctx, arg);
finish(exec_ctx, arg);
}
static bool start_execute_final(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
if (lock->take_async_break_before_final_list) {
grpc_closure_init(&lock->continue_finishing, continue_executing_final,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
lock->optional_workqueue);
return false;
} else {
execute_final(exec_ctx, lock);
return true;
}
}
static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
if (n == NULL) {
// queue is in an inconsistant state: use this as a cue that we should
// go off and do something else for a while (and come back later)
grpc_closure_init(&lock->continue_finishing, continue_finishing_mainline,
lock);
grpc_exec_ctx_sched(exec_ctx, &lock->continue_finishing, GRPC_ERROR_NONE,
lock->optional_workqueue);
return false;
@ -89,8 +130,16 @@ static bool maybe_finish_one(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
}
static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
bool (*executor)(grpc_exec_ctx * exec_ctx, grpc_combiner * lock) =
maybe_finish_one;
do {
switch (gpr_atm_full_fetch_add(&lock->state, -2)) {
case 5: // we're down to one queued item: if it's the final list we
case 4: // should do that
if (!grpc_closure_list_empty(lock->final_list)) {
executor = start_execute_final;
}
break;
case 3: // had one count, one unorphaned --> unlocked unorphaned
return;
case 2: // and one count, one orphaned --> unlocked and orphaned
@ -102,12 +151,7 @@ static void finish(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
// deleted lock
GPR_UNREACHABLE_CODE(return );
}
} while (maybe_finish_one(exec_ctx, lock));
}
static void continue_finishing(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
if (maybe_finish_one(exec_ctx, arg)) finish(exec_ctx, arg);
} while (executor(exec_ctx, lock));
}
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
@ -123,3 +167,15 @@ void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
}
}
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break) {
if (force_async_break) {
lock->take_async_break_before_final_list = true;
}
if (grpc_closure_list_empty(lock->final_list)) {
gpr_atm_full_fetch_add(&lock->state, 2);
}
grpc_closure_list_append(&lock->final_list, closure, error);
}

@ -55,5 +55,13 @@ void grpc_combiner_destroy(grpc_combiner *lock);
// Execute \a action within the lock.
void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error);
// Execute \a action within the lock just prior to unlocking.
// if \a force_async_break is additionally set, the combiner is forced to trip
// through the workqueue between finishing the primary queue of combined
// closures and executing the finally list.
// Can only be called from within a closure scheduled by grpc_combiner_execute
void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
grpc_closure *closure, grpc_error *error,
bool force_async_break);
#endif /* GRPC_CORE_LIB_IOMGR_COMBINER_H */

@ -120,12 +120,26 @@ static void test_execute_many(void) {
grpc_combiner_destroy(lock);
}
static void test_execute_finally(void) {
gpr_log(GPR_DEBUG, "test_execute_finally");
grpc_combiner *lock = grpc_combiner_create(NULL);
bool done = false;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_combiner_execute(&exec_ctx, lock, grpc_closure_create(add_finally, lock),
GRPC_ERROR_NONE);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
grpc_combiner_destroy(lock);
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
test_no_op();
test_execute_one();
test_execute_many();
test_execute_finally();
grpc_shutdown();
return 0;

Loading…
Cancel
Save