Review feedback

reviewable/pr8239/r7
Craig Tiller 8 years ago
parent 3d357d901c
commit 8abc796ed7
  1. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  2. 26
      src/core/lib/iomgr/resource_quota.c
  3. 19
      src/core/lib/iomgr/resource_quota.h
  4. 5
      test/core/end2end/tests/resource_quota_server.c
  5. 10
      test/core/iomgr/resource_quota_test.c

@ -2189,7 +2189,7 @@ static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_ERROR_INT_HTTP2_ERROR,
GRPC_CHTTP2_ENHANCE_YOUR_CALM)); GRPC_CHTTP2_ENHANCE_YOUR_CALM));
if (n > 1) { if (n > 1) {
/* Since we cancel one stream per destructive reclaimation, if /* Since we cancel one stream per destructive reclamation, if
there are more streams left, we can immediately post a new there are more streams left, we can immediately post a new
reclaimer in case the resource quota needs to free more reclaimer in case the resource quota needs to free more
memory */ memory */

@ -89,6 +89,7 @@ static void rulist_add_head(grpc_resource_user *resource_user,
resource_user->links[list].prev = (*root)->links[list].prev; resource_user->links[list].prev = (*root)->links[list].prev;
resource_user->links[list].next->links[list].prev = resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev->links[list].next = resource_user; resource_user->links[list].prev->links[list].next = resource_user;
*root = resource_user;
} }
} }
@ -105,7 +106,6 @@ static void rulist_add_tail(grpc_resource_user *resource_user,
resource_user->links[list].prev = *root; resource_user->links[list].prev = *root;
resource_user->links[list].next->links[list].prev = resource_user->links[list].next->links[list].prev =
resource_user->links[list].prev->links[list].next = resource_user; resource_user->links[list].prev->links[list].next = resource_user;
*root = resource_user;
} }
} }
@ -114,7 +114,7 @@ static bool rulist_empty(grpc_resource_quota *resource_quota,
return resource_quota->roots[list] == NULL; return resource_quota->roots[list] == NULL;
} }
static grpc_resource_user *rulist_pop_tail(grpc_resource_quota *resource_quota, static grpc_resource_user *rulist_pop_head(grpc_resource_quota *resource_quota,
grpc_rulist list) { grpc_rulist list) {
grpc_resource_user **root = &resource_quota->roots[list]; grpc_resource_user **root = &resource_quota->roots[list];
grpc_resource_user *resource_user = *root; grpc_resource_user *resource_user = *root;
@ -186,7 +186,7 @@ static void rq_step_sched(grpc_exec_ctx *exec_ctx,
static bool rq_alloc(grpc_exec_ctx *exec_ctx, static bool rq_alloc(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) { grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user; grpc_resource_user *resource_user;
while ((resource_user = rulist_pop_tail(resource_quota, while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION))) { GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu); gpr_mu_lock(&resource_user->mu);
if (resource_user->free_pool < 0 && if (resource_user->free_pool < 0 &&
@ -209,7 +209,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL); grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
} else { } else {
rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
gpr_mu_unlock(&resource_user->mu); gpr_mu_unlock(&resource_user->mu);
return false; return false;
} }
@ -221,7 +221,7 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
static bool rq_reclaim_from_per_user_free_pool( static bool rq_reclaim_from_per_user_free_pool(
grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) { grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user; grpc_resource_user *resource_user;
while ((resource_user = rulist_pop_tail(resource_quota, while ((resource_user = rulist_pop_head(resource_quota,
GRPC_RULIST_NON_EMPTY_FREE_POOL))) { GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
gpr_mu_lock(&resource_user->mu); gpr_mu_lock(&resource_user->mu);
if (resource_user->free_pool > 0) { if (resource_user->free_pool > 0) {
@ -249,7 +249,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
if (resource_quota->reclaiming) return true; if (resource_quota->reclaiming) return true;
grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
: GRPC_RULIST_RECLAIMER_BENIGN; : GRPC_RULIST_RECLAIMER_BENIGN;
grpc_resource_user *resource_user = rulist_pop_tail(resource_quota, list); grpc_resource_user *resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == NULL) return false; if (resource_user == NULL) return false;
if (grpc_resource_quota_trace) { if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation", gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation",
@ -325,7 +325,7 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
GRPC_RULIST_AWAITING_ALLOCATION)) { GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota); rq_step_sched(exec_ctx, resource_user->resource_quota);
} }
rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION); rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
} }
static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru, static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
@ -337,7 +337,7 @@ static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
GRPC_RULIST_NON_EMPTY_FREE_POOL)) { GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
rq_step_sched(exec_ctx, resource_user->resource_quota); rq_step_sched(exec_ctx, resource_user->resource_quota);
} }
rulist_add_head(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL); rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
} }
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
@ -351,7 +351,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
GRPC_RULIST_RECLAIMER_BENIGN)) { GRPC_RULIST_RECLAIMER_BENIGN)) {
rq_step_sched(exec_ctx, resource_user->resource_quota); rq_step_sched(exec_ctx, resource_user->resource_quota);
} }
rulist_add_head(resource_user, GRPC_RULIST_RECLAIMER_BENIGN); rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
} }
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru, static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
@ -367,7 +367,7 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) { GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
rq_step_sched(exec_ctx, resource_user->resource_quota); rq_step_sched(exec_ctx, resource_user->resource_quota);
} }
rulist_add_head(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE); rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
} }
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) { static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
@ -563,9 +563,6 @@ void grpc_resource_user_init(grpc_resource_user *resource_user,
for (int i = 0; i < GRPC_RULIST_COUNT; i++) { for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = NULL; resource_user->links[i].next = resource_user->links[i].prev = NULL;
} }
#ifndef NDEBUG
resource_user->asan_canary = gpr_malloc(1);
#endif
if (name != NULL) { if (name != NULL) {
resource_user->name = gpr_strdup(name); resource_user->name = gpr_strdup(name);
} else { } else {
@ -592,9 +589,6 @@ void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx, void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) { grpc_resource_user *resource_user) {
#ifndef NDEBUG
gpr_free(resource_user->asan_canary);
#endif
grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota); grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
gpr_mu_destroy(&resource_user->mu); gpr_mu_destroy(&resource_user->mu);
gpr_free(resource_user->name); gpr_free(resource_user->name);

@ -49,7 +49,8 @@
resource constrained, grpc_resource_user instances are asked (in turn) to resource constrained, grpc_resource_user instances are asked (in turn) to
free up whatever they can so that the system as a whole can make progress. free up whatever they can so that the system as a whole can make progress.
There are three kinds of reclamation that take place: There are three kinds of reclamation that take place, in order of increasing
invasiveness:
- an internal reclamation, where cached resource at the resource user level - an internal reclamation, where cached resource at the resource user level
is returned to the quota is returned to the quota
- a benign reclamation phase, whereby resources that are in use but are not - a benign reclamation phase, whereby resources that are in use but are not
@ -58,9 +59,14 @@
make progress may be enacted so that at least one part of the system can make progress may be enacted so that at least one part of the system can
complete. complete.
These reclamations are tried in priority order, and only one reclamation Only one reclamation will be outstanding for a given quota at a given time.
is outstanding for a quota at any given time (meaning that if a destructive On each reclamation attempt, the kinds of reclamation are tried in order of
reclamation makes progress, we may follow up with a benign reclamation). increasing invasiveness, stopping at the first one that succeeds. Thus, on a
given reclamation attempt, if internal and benign reclamation both fail, it
will wind up doing a destructive reclamation. However, the next reclamation
attempt may then be able to get what it needs via internal or benign
reclamation, due to resources that may have been freed up by the destructive
reclamation in the previous attempt.
Future work will be to expose the current resource pressure so that back Future work will be to expose the current resource pressure so that back
pressure can be applied to avoid reclamation phases starting. pressure can be applied to avoid reclamation phases starting.
@ -112,11 +118,6 @@ struct grpc_resource_user {
lock */ lock */
grpc_closure add_to_free_pool_closure; grpc_closure add_to_free_pool_closure;
#ifndef NDEBUG
/* Canary object to detect leaked resource users with ASAN */
void *asan_canary;
#endif
gpr_mu mu; gpr_mu mu;
/* Total allocated memory outstanding by this resource user in bytes; /* Total allocated memory outstanding by this resource user in bytes;
always positive */ always positive */

@ -339,9 +339,8 @@ void resource_quota_server(grpc_end2end_test_config config) {
"Done. %d total calls: %d cancelled at server, %d cancelled at client.", "Done. %d total calls: %d cancelled at server, %d cancelled at client.",
NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client); NUM_CALLS, cancelled_calls_on_server, cancelled_calls_on_client);
/* The server call may be cancelled after it's received it's status, but /* The call may be cancelled after the server has sent its status but before
* before the client does: this means that we should see strictly more * the client has received it. */
* failures on the client than on the server */
GPR_ASSERT(cancelled_calls_on_client >= cancelled_calls_on_server); GPR_ASSERT(cancelled_calls_on_client >= cancelled_calls_on_server);
/* However, we shouldn't see radically more... 0.9 is a guessed bound on what /* However, we shouldn't see radically more... 0.9 is a guessed bound on what
* we'd want that ratio to be... to at least trigger some investigation should * we'd want that ratio to be... to at least trigger some investigation should

@ -553,9 +553,13 @@ static void test_resource_user_stays_allocated_until_memory_released(void) {
static void static void
test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released( test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released(
void) { void) {
gpr_log(GPR_INFO, "** test_pools_merged_on_resource_user_deletion **"); gpr_log(GPR_INFO,
grpc_resource_quota *q = "** "
grpc_resource_quota_create("test_pools_merged_on_resource_user_deletion"); "test_resource_user_stays_allocated_and_reclaimers_unrun_until_"
"memory_released **");
grpc_resource_quota *q = grpc_resource_quota_create(
"test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_"
"released");
grpc_resource_quota_resize(q, 1024); grpc_resource_quota_resize(q, 1024);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
grpc_resource_user usr; grpc_resource_user usr;

Loading…
Cancel
Save