clang-format

reviewable/pr8239/r1
Craig Tiller 8 years ago
parent 2f1d8708e5
commit afcc8752f3
  1. 4
      src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c
  2. 2
      src/core/lib/iomgr/endpoint.h
  3. 3
      src/core/lib/iomgr/endpoint_pair.h
  4. 3
      src/core/lib/iomgr/endpoint_pair_posix.c
  5. 131
      src/core/lib/iomgr/resource_quota.c
  6. 6
      src/core/lib/security/credentials/jwt/jwt_verifier.c
  7. 7
      src/core/lib/security/credentials/oauth2/oauth2_credentials.c
  8. 3
      src/core/lib/security/transport/secure_endpoint.c
  9. 3
      src/cpp/common/channel_arguments.cc
  10. 2
      src/cpp/server/server_builder.cc
  11. 3
      test/core/bad_client/bad_client.c
  12. 3
      test/core/end2end/fuzzers/client_fuzzer.c
  13. 3
      test/core/end2end/fuzzers/server_fuzzer.c
  14. 3
      test/core/end2end/tests/resource_quota_server.c
  15. 4
      test/core/http/httpcli_test.c
  16. 4
      test/core/http/httpscli_test.c
  17. 3
      test/core/iomgr/endpoint_pair_test.c
  18. 5
      test/core/util/passthru_endpoint.c
  19. 6
      test/cpp/qps/driver.cc

@ -57,8 +57,8 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server *server,
char *name;
gpr_asprintf(&name, "fd:%d", fd);
grpc_resource_quota *resource_quota =
grpc_resource_quota_from_channel_args(grpc_server_get_channel_args(server));
grpc_resource_quota *resource_quota = grpc_resource_quota_from_channel_args(
grpc_server_get_channel_args(server));
grpc_endpoint *server_endpoint =
grpc_tcp_create(grpc_fd_create(fd, name), resource_quota,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, name);

@ -37,9 +37,9 @@
#include <grpc/support/slice.h>
#include <grpc/support/slice_buffer.h>
#include <grpc/support/time.h>
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resource_quota.h"
/* An endpoint caps a streaming channel between two communicating processes.
Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */

@ -42,6 +42,7 @@ typedef struct {
} grpc_endpoint_pair;
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
const char *name, grpc_resource_quota *resource_quota, size_t read_slice_size);
const char *name, grpc_resource_quota *resource_quota,
size_t read_slice_size);
#endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */

@ -63,7 +63,8 @@ static void create_sockets(int sv[2]) {
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
const char *name, grpc_resource_quota *resource_quota, size_t read_slice_size) {
const char *name, grpc_resource_quota *resource_quota,
size_t read_slice_size) {
int sv[2];
grpc_endpoint_pair p;
char *final_name;

@ -73,12 +73,14 @@ struct grpc_resource_quota {
* list management
*/
static void bulist_add_tail(grpc_resource_user *resource_user, grpc_bulist list) {
static void bulist_add_tail(grpc_resource_user *resource_user,
grpc_bulist list) {
grpc_resource_quota *resource_quota = resource_user->resource_quota;
grpc_resource_user **root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
resource_user;
} else {
resource_user->links[list].next = *root;
resource_user->links[list].prev = (*root)->links[list].prev;
@ -87,12 +89,14 @@ static void bulist_add_tail(grpc_resource_user *resource_user, grpc_bulist list)
}
}
static void bulist_add_head(grpc_resource_user *resource_user, grpc_bulist list) {
static void bulist_add_head(grpc_resource_user *resource_user,
grpc_bulist list) {
grpc_resource_quota *resource_quota = resource_user->resource_quota;
grpc_resource_user **root = &resource_quota->roots[list];
if (*root == NULL) {
*root = resource_user;
resource_user->links[list].next = resource_user->links[list].prev = resource_user;
resource_user->links[list].next = resource_user->links[list].prev =
resource_user;
} else {
resource_user->links[list].next = (*root)->links[list].next;
resource_user->links[list].prev = *root;
@ -102,12 +106,13 @@ static void bulist_add_head(grpc_resource_user *resource_user, grpc_bulist list)
}
}
static bool bulist_empty(grpc_resource_quota *resource_quota, grpc_bulist list) {
static bool bulist_empty(grpc_resource_quota *resource_quota,
grpc_bulist list) {
return resource_quota->roots[list] == NULL;
}
static grpc_resource_user *bulist_pop(grpc_resource_quota *resource_quota,
grpc_bulist list) {
grpc_bulist list) {
grpc_resource_user **root = &resource_quota->roots[list];
grpc_resource_user *resource_user = *root;
if (resource_user == NULL) {
@ -145,10 +150,12 @@ static void bulist_remove(grpc_resource_user *resource_user, grpc_bulist list) {
* buffer pool state machine
*/
static bool bpalloc(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
static bool bpscavenge(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota);
static bool bpreclaim(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota,
bool destructive);
static bool bpalloc(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
static bool bpscavenge(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota);
static bool bpreclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void bpstep(grpc_exec_ctx *exec_ctx, void *bp, grpc_error *error) {
grpc_resource_quota *resource_quota = bp;
@ -168,12 +175,13 @@ static void bpstep_sched(grpc_exec_ctx *exec_ctx,
resource_quota->step_scheduled = true;
grpc_resource_quota_internal_ref(resource_quota);
grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
&resource_quota->bpstep_closure, GRPC_ERROR_NONE,
false);
&resource_quota->bpstep_closure,
GRPC_ERROR_NONE, false);
}
/* returns true if all allocations are completed */
static bool bpalloc(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
static bool bpalloc(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user;
while ((resource_user =
bulist_pop(resource_quota, GRPC_BULIST_AWAITING_ALLOCATION))) {
@ -207,7 +215,8 @@ static bool bpalloc(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota
}
/* returns true if any memory could be reclaimed from buffers */
static bool bpscavenge(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota) {
static bool bpscavenge(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
grpc_resource_user *resource_user;
while ((resource_user =
bulist_pop(resource_quota, GRPC_BULIST_NON_EMPTY_FREE_POOL))) {
@ -232,16 +241,17 @@ static bool bpscavenge(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_qu
}
/* returns true if reclaimation is proceeding */
static bool bpreclaim(grpc_exec_ctx *exec_ctx, grpc_resource_quota *resource_quota,
bool destructive) {
static bool bpreclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive) {
if (resource_quota->reclaiming) return true;
grpc_bulist list = destructive ? GRPC_BULIST_RECLAIMER_DESTRUCTIVE
: GRPC_BULIST_RECLAIMER_BENIGN;
grpc_resource_user *resource_user = bulist_pop(resource_quota, list);
if (resource_user == NULL) return false;
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: initiate %s reclaimation", resource_quota->name,
resource_user->name, destructive ? "destructive" : "benign");
gpr_log(GPR_DEBUG, "BP %s %s: initiate %s reclaimation",
resource_quota->name, resource_user->name,
destructive ? "destructive" : "benign");
}
resource_quota->reclaiming = true;
grpc_resource_quota_internal_ref(resource_quota);
@ -284,7 +294,8 @@ static void bu_slice_unref(void *p) {
}
}
static gpr_slice bu_slice_create(grpc_resource_user *resource_user, size_t size) {
static gpr_slice bu_slice_create(grpc_resource_user *resource_user,
size_t size) {
bu_slice_refcount *rc = gpr_malloc(sizeof(bu_slice_refcount) + size);
rc->base.ref = bu_slice_ref;
rc->base.unref = bu_slice_unref;
@ -304,7 +315,8 @@ static gpr_slice bu_slice_create(grpc_resource_user *resource_user, size_t size)
static void bu_allocate(grpc_exec_ctx *exec_ctx, void *bu, grpc_error *error) {
grpc_resource_user *resource_user = bu;
if (bulist_empty(resource_user->resource_quota, GRPC_BULIST_AWAITING_ALLOCATION)) {
if (bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_AWAITING_ALLOCATION);
@ -315,7 +327,8 @@ static void bu_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *bu,
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota, GRPC_BULIST_NON_EMPTY_FREE_POOL)) {
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_NON_EMPTY_FREE_POOL);
@ -326,8 +339,10 @@ static void bu_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *bu,
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota, GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota, GRPC_BULIST_RECLAIMER_BENIGN)) {
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_BENIGN)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
}
bulist_add_tail(resource_user, GRPC_BULIST_RECLAIMER_BENIGN);
@ -338,8 +353,10 @@ static void bu_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *bu,
grpc_resource_user *resource_user = bu;
if (!bulist_empty(resource_user->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION) &&
bulist_empty(resource_user->resource_quota, GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota, GRPC_BULIST_RECLAIMER_BENIGN) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_NON_EMPTY_FREE_POOL) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_BENIGN) &&
bulist_empty(resource_user->resource_quota,
GRPC_BULIST_RECLAIMER_DESTRUCTIVE)) {
bpstep_sched(exec_ctx, resource_user->resource_quota);
@ -371,9 +388,9 @@ static void bu_allocated_slices(grpc_exec_ctx *exec_ctx, void *ts,
grpc_resource_user_slice_allocator *slice_allocator = ts;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
gpr_slice_buffer_add_indexed(slice_allocator->dest,
bu_slice_create(slice_allocator->resource_user,
slice_allocator->length));
gpr_slice_buffer_add_indexed(
slice_allocator->dest, bu_slice_create(slice_allocator->resource_user,
slice_allocator->length));
}
}
grpc_closure_run(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
@ -393,7 +410,8 @@ static void bp_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
if (delta < 0 && a->resource_quota->free_pool < 0) {
bpstep_sched(exec_ctx, a->resource_quota);
} else if (delta > 0 &&
!bulist_empty(a->resource_quota, GRPC_BULIST_AWAITING_ALLOCATION)) {
!bulist_empty(a->resource_quota,
GRPC_BULIST_AWAITING_ALLOCATION)) {
bpstep_sched(exec_ctx, a->resource_quota);
}
grpc_resource_quota_internal_unref(exec_ctx, a->resource_quota);
@ -436,7 +454,7 @@ grpc_resource_quota *grpc_resource_quota_create(const char *name) {
}
void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota) {
grpc_resource_quota *resource_quota) {
if (gpr_unref(&resource_quota->refs)) {
grpc_combiner_destroy(exec_ctx, resource_quota->combiner);
gpr_free(resource_quota->name);
@ -450,7 +468,8 @@ void grpc_resource_quota_unref(grpc_resource_quota *resource_quota) {
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_resource_quota *grpc_resource_quota_internal_ref(grpc_resource_quota *resource_quota) {
grpc_resource_quota *grpc_resource_quota_internal_ref(
grpc_resource_quota *resource_quota) {
gpr_ref(&resource_quota->refs);
return resource_quota;
}
@ -459,7 +478,8 @@ void grpc_resource_quota_ref(grpc_resource_quota *resource_quota) {
grpc_resource_quota_internal_ref(resource_quota);
}
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota, size_t size) {
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
bp_resize_args *a = gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
@ -508,16 +528,20 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
*/
void grpc_resource_user_init(grpc_resource_user *resource_user,
grpc_resource_quota *resource_quota, const char *name) {
resource_user->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &bu_allocate, resource_user);
grpc_resource_quota *resource_quota,
const char *name) {
resource_user->resource_quota =
grpc_resource_quota_internal_ref(resource_quota);
grpc_closure_init(&resource_user->allocate_closure, &bu_allocate,
resource_user);
grpc_closure_init(&resource_user->add_to_free_pool_closure,
&bu_add_to_free_pool, resource_user);
grpc_closure_init(&resource_user->post_reclaimer_closure[0],
&bu_post_benign_reclaimer, resource_user);
grpc_closure_init(&resource_user->post_reclaimer_closure[1],
&bu_post_destructive_reclaimer, resource_user);
grpc_closure_init(&resource_user->destroy_closure, &bu_destroy, resource_user);
grpc_closure_init(&resource_user->destroy_closure, &bu_destroy,
resource_user);
gpr_mu_init(&resource_user->mu);
resource_user->allocated = 0;
resource_user->free_pool = 0;
@ -542,8 +566,8 @@ void grpc_resource_user_init(grpc_resource_user *resource_user,
}
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
grpc_closure *on_done) {
grpc_resource_user *resource_user,
grpc_closure *on_done) {
gpr_mu_lock(&resource_user->mu);
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) ==
0);
@ -558,7 +582,7 @@ void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
grpc_resource_user *resource_user) {
#ifndef NDEBUG
gpr_free(resource_user->asan_canary);
#endif
@ -568,8 +592,8 @@ void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size,
grpc_closure *optional_on_done) {
grpc_resource_user *resource_user, size_t size,
grpc_closure *optional_on_done) {
gpr_mu_lock(&resource_user->mu);
grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
&resource_user->on_done_destroy_closure);
@ -609,7 +633,7 @@ void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user, size_t size) {
grpc_resource_user *resource_user, size_t size) {
gpr_mu_lock(&resource_user->mu);
GPR_ASSERT(resource_user->allocated >= (int64_t)size);
bool was_zero_or_negative = resource_user->free_pool <= 0;
@ -640,8 +664,9 @@ void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
bool destructive, grpc_closure *closure) {
grpc_resource_user *resource_user,
bool destructive,
grpc_closure *closure) {
if (gpr_atm_acq_load(&resource_user->on_done_destroy_closure) == 0) {
GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
resource_user->reclaimers[destructive] = closure;
@ -654,14 +679,15 @@ void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
}
void grpc_resource_user_finish_reclaimation(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
grpc_resource_user *resource_user) {
if (grpc_resource_quota_trace) {
gpr_log(GPR_DEBUG, "BP %s %s: reclaimation complete",
resource_user->resource_quota->name, resource_user->name);
}
grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
&resource_user->resource_quota->bpreclaimation_done_closure,
GRPC_ERROR_NONE, false);
grpc_combiner_execute(
exec_ctx, resource_user->resource_quota->combiner,
&resource_user->resource_quota->bpreclaimation_done_closure,
GRPC_ERROR_NONE, false);
}
void grpc_resource_user_slice_allocator_init(
@ -674,11 +700,12 @@ void grpc_resource_user_slice_allocator_init(
}
void grpc_resource_user_alloc_slices(
grpc_exec_ctx *exec_ctx, grpc_resource_user_slice_allocator *slice_allocator,
size_t length, size_t count, gpr_slice_buffer *dest) {
grpc_exec_ctx *exec_ctx,
grpc_resource_user_slice_allocator *slice_allocator, size_t length,
size_t count, gpr_slice_buffer *dest) {
slice_allocator->length = length;
slice_allocator->count = count;
slice_allocator->dest = dest;
grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user, count * length,
&slice_allocator->on_allocated);
grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
count * length, &slice_allocator->on_allocated);
}

@ -660,7 +660,8 @@ static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota = grpc_resource_quota_create("jwt_verifier");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
@ -772,7 +773,8 @@ static void retrieve_key_and_verify(grpc_exec_ctx *exec_ctx,
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota = grpc_resource_quota_create("jwt_verifier");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("jwt_verifier");
grpc_httpcli_get(
exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),

@ -310,7 +310,8 @@ static void compute_engine_fetch_oauth2(
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
channel. This would allow us to cancel an authentication query when under
extreme memory pressure. */
grpc_resource_quota *resource_quota = grpc_resource_quota_create("oauth2_credentials");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials");
grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
deadline, grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
@ -367,8 +368,8 @@ static void refresh_token_fetch_oauth2(
extreme memory pressure. */
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("oauth2_credentials_refresh");
grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota, &request,
body, strlen(body), deadline,
grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
&request, body, strlen(body), deadline,
grpc_closure_create(response_cb, metadata_req),
&metadata_req->response);
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);

@ -370,7 +370,8 @@ static grpc_workqueue *endpoint_get_workqueue(grpc_endpoint *secure_ep) {
return grpc_endpoint_get_workqueue(ep->wrapped_ep);
}
static grpc_resource_user *endpoint_get_resource_user(grpc_endpoint *secure_ep) {
static grpc_resource_user *endpoint_get_resource_user(
grpc_endpoint *secure_ep) {
secure_endpoint *ep = (secure_endpoint *)secure_ep;
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}

@ -114,7 +114,8 @@ void ChannelArguments::SetUserAgentPrefix(
}
}
void ChannelArguments::SetResourceQuota(const grpc::ResourceQuota& resource_quota) {
void ChannelArguments::SetResourceQuota(
const grpc::ResourceQuota& resource_quota) {
SetPointerWithVtable(GRPC_ARG_BUFFER_POOL, resource_quota.c_resource_quota(),
grpc_resource_quota_arg_vtable());
}

@ -33,8 +33,8 @@
#include <grpc++/server_builder.h>
#include <grpc++/resource_quota.h>
#include <grpc++/impl/service_type.h>
#include <grpc++/resource_quota.h>
#include <grpc++/server.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>

@ -114,7 +114,8 @@ void grpc_run_bad_client_test(
grpc_init();
/* Create endpoints */
grpc_resource_quota *resource_quota = grpc_resource_quota_create("bad_client_test");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("bad_client_test");
sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 65536);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);

@ -58,7 +58,8 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_quota *resource_quota = grpc_resource_quota_create("client_fuzzer");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("client_fuzzer");
grpc_endpoint *mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);

@ -56,7 +56,8 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
grpc_init();
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_quota *resource_quota = grpc_resource_quota_create("server_fuzzer");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("server_fuzzer");
grpc_endpoint *mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);

@ -108,7 +108,8 @@ static gpr_slice generate_random_slice() {
}
void resource_quota_server(grpc_end2end_test_config config) {
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_server");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("test_server");
grpc_resource_quota_resize(resource_quota, 5 * 1024 * 1024);
#define NUM_CALLS 100

@ -130,8 +130,8 @@ static void test_post(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello",
5, n_seconds_time(15),
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
"hello", 5, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);

@ -132,8 +132,8 @@ static void test_post(int port) {
grpc_http_response response;
memset(&response, 0, sizeof(response));
grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello",
5, n_seconds_time(15),
grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
"hello", 5, n_seconds_time(15),
grpc_closure_create(on_finish, &response), &response);
grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
gpr_mu_lock(g_mu);

@ -49,7 +49,8 @@ static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
size_t slice_size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_endpoint_test_fixture f;
grpc_resource_quota *resource_quota = grpc_resource_quota_create("endpoint_pair_test");
grpc_resource_quota *resource_quota =
grpc_resource_quota_create("endpoint_pair_test");
grpc_endpoint_pair p =
grpc_iomgr_create_endpoint_pair("test", resource_quota, slice_size);
grpc_resource_quota_unref(resource_quota);

@ -143,7 +143,7 @@ static void me_really_destroy(grpc_exec_ctx *exec_ctx, void *ep,
static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
half *m = (half *)ep;
grpc_resource_user_shutdown(exec_ctx, &m->resource_user,
grpc_closure_create(me_really_destroy, m));
grpc_closure_create(me_really_destroy, m));
}
static char *me_get_peer(grpc_endpoint *ep) {
@ -170,7 +170,8 @@ static const grpc_endpoint_vtable vtable = {
};
static void half_init(half *m, passthru_endpoint *parent,
grpc_resource_quota *resource_quota, const char *half_name) {
grpc_resource_quota *resource_quota,
const char *half_name) {
m->base.vtable = &vtable;
m->parent = parent;
gpr_slice_buffer_init(&m->read_buffer);

@ -170,8 +170,10 @@ static void postprocess_scenario_result(ScenarioResult* result) {
failures += rrc.count();
}
}
result->mutable_summary()->set_successful_requests_per_second(successes / time_estimate);
result->mutable_summary()->set_failed_requests_per_second(failures / time_estimate);
result->mutable_summary()->set_successful_requests_per_second(
successes / time_estimate);
result->mutable_summary()->set_failed_requests_per_second(failures /
time_estimate);
}
}

Loading…
Cancel
Save