Enforcing reserved entries to be NULL.

pull/2612/head
Nicolas "Pixel" Noble 10 years ago
parent 9a123df2cd
commit 4599288f8a
  1. 5
      src/core/surface/call.c
  2. 6
      src/core/surface/channel.c
  3. 2
      src/core/surface/channel_create.c
  4. 6
      src/core/surface/completion_queue.c
  5. 2
      src/core/surface/server.c

@ -1187,7 +1187,7 @@ void grpc_call_destroy(grpc_call *c) {
}
grpc_call_error grpc_call_cancel(grpc_call *call, void *reserved) {
(void) reserved;
GPR_ASSERT(!reserved);
return grpc_call_cancel_with_status(call, GRPC_STATUS_CANCELLED, "Cancelled",
NULL);
}
@ -1431,7 +1431,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
const grpc_op *op;
grpc_ioreq *req;
void (*finish_func)(grpc_call *, int, void *) = finish_batch;
(void) reserved;
GPR_ASSERT(!reserved);
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, tag);
@ -1446,6 +1446,7 @@ grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
/* rewrite batch ops into ioreq ops */
for (in = 0, out = 0; in < nops; in++) {
op = &ops[in];
GPR_ASSERT(!op->reserved);
switch (op->op) {
case GRPC_OP_SEND_INITIAL_METADATA:
/* Flag validation: currently allow no flags */

@ -155,7 +155,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
grpc_completion_queue *cq,
const char *method, const char *host,
gpr_timespec deadline, void *reserved) {
(void) reserved;
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
channel, cq,
grpc_mdelem_from_metadata_strings(
@ -170,7 +170,7 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved) {
registered_call *rc = gpr_malloc(sizeof(registered_call));
(void) reserved;
GPR_ASSERT(!reserved);
rc->path = grpc_mdelem_from_metadata_strings(
channel->metadata_context, GRPC_MDSTR_REF(channel->path_string),
grpc_mdstr_from_string(channel->metadata_context, method));
@ -188,7 +188,7 @@ grpc_call *grpc_channel_create_registered_call(
grpc_channel *channel, grpc_completion_queue *completion_queue,
void *registered_call_handle, gpr_timespec deadline, void *reserved) {
registered_call *rc = registered_call_handle;
(void) reserved;
GPR_ASSERT(!reserved);
return grpc_channel_create_call_internal(
channel, completion_queue, GRPC_MDELEM_REF(rc->path),
GRPC_MDELEM_REF(rc->authority), deadline);

@ -161,7 +161,7 @@ grpc_channel *grpc_channel_create(const char *target,
subchannel_factory *f;
grpc_mdctx *mdctx = grpc_mdctx_create();
int n = 0;
(void) reserved;
GPR_ASSERT(!reserved);
/* TODO(census)
if (grpc_channel_args_is_census_enabled(args)) {
filters[n++] = &grpc_client_census_filter;

@ -64,7 +64,7 @@ struct grpc_completion_queue {
grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
(void) reserved;
GPR_ASSERT(!reserved);
memset(cc, 0, sizeof(*cc));
/* Initial ref is dropped by grpc_completion_queue_shutdown */
gpr_ref_init(&cc->pending_events, 1);
@ -149,7 +149,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline,
void *reserved) {
grpc_event ret;
(void) reserved;
GPR_ASSERT(!reserved);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -192,7 +192,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_event ret;
grpc_cq_completion *c;
grpc_cq_completion *prev;
(void) reserved;
GPR_ASSERT(!reserved);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

@ -729,7 +729,7 @@ void grpc_server_register_completion_queue(grpc_server *server,
grpc_completion_queue *cq,
void *reserved) {
size_t i, n;
(void) reserved;
GPR_ASSERT(!reserved);
for (i = 0; i < server->cq_count; i++) {
if (server->cqs[i] == cq) return;
}

Loading…
Cancel
Save