Remove unused-parameter warnings, round 2 (12 of 19)

pull/20707/head
Vijay Pai 5 years ago
parent 2d80e830c0
commit 32760aca00
  1. 2
      src/core/lib/surface/channel_ping.cc
  2. 23
      src/core/lib/surface/completion_queue.cc
  3. 2
      src/core/lib/surface/completion_queue_factory.cc
  4. 2
      src/core/lib/surface/init.cc
  5. 4
      src/core/lib/surface/init_secure.cc
  6. 8
      src/core/lib/surface/lame_client.cc
  7. 19
      src/core/lib/surface/server.cc
  8. 4
      src/core/lib/transport/byte_stream.cc
  9. 2
      src/core/lib/transport/connectivity_state.cc
  10. 6
      src/core/lib/transport/metadata_batch.cc

@ -35,7 +35,7 @@ typedef struct {
grpc_cq_completion completion_storage;
} ping_result;
static void ping_destroy(void* arg, grpc_cq_completion* storage) {
static void ping_destroy(void* arg, grpc_cq_completion* /*storage*/) {
gpr_free(arg);
}

@ -529,7 +529,8 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
}
static void cq_init_next(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
void* data,
grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
new (data) cq_next_data();
}
@ -539,7 +540,8 @@ static void cq_destroy_next(void* data) {
}
static void cq_init_pluck(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
void* data,
grpc_experimental_completion_queue_functor* /*shutdown_callback*/) {
new (data) cq_pluck_data();
}
@ -582,7 +584,7 @@ void grpc_cq_internal_ref(grpc_completion_queue* cq) {
cq->owning_refs.Ref(debug_location, reason);
}
static void on_pollset_shutdown_done(void* arg, grpc_error* error) {
static void on_pollset_shutdown_done(void* arg, grpc_error* /*error*/) {
grpc_completion_queue* cq = static_cast<grpc_completion_queue*>(arg);
GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy");
}
@ -630,20 +632,21 @@ static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {
GPR_ASSERT(found);
}
#else
static void cq_check_tag(grpc_completion_queue* cq, void* tag, bool lock_cq) {}
static void cq_check_tag(grpc_completion_queue* /*cq*/, void* /*tag*/,
bool /*lock_cq*/) {}
#endif
static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag) {
static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* /*tag*/) {
cq_next_data* cqd = static_cast<cq_next_data*> DATA_FROM_CQ(cq);
return cqd->pending_events.IncrementIfNonzero();
}
static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag) {
static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* /*tag*/) {
cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
return cqd->pending_events.IncrementIfNonzero();
}
static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag) {
static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* /*tag*/) {
cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
return cqd->pending_events.IncrementIfNonzero();
}
@ -669,7 +672,7 @@ bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
static void cq_end_op_for_next(
grpc_completion_queue* cq, void* tag, grpc_error* error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal) {
grpc_cq_completion* storage, bool /*internal*/) {
GPR_TIMER_SCOPE("cq_end_op_for_next", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
@ -748,7 +751,7 @@ static void cq_end_op_for_next(
static void cq_end_op_for_pluck(
grpc_completion_queue* cq, void* tag, grpc_error* error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal) {
grpc_cq_completion* storage, bool /*internal*/) {
GPR_TIMER_SCOPE("cq_end_op_for_pluck", 0);
cq_pluck_data* cqd = static_cast<cq_pluck_data*> DATA_FROM_CQ(cq);
@ -939,7 +942,7 @@ static void dump_pending_tags(grpc_completion_queue* cq) {
gpr_free(out);
}
#else
static void dump_pending_tags(grpc_completion_queue* cq) {}
static void dump_pending_tags(grpc_completion_queue* /*cq*/) {}
#endif
static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,

@ -28,7 +28,7 @@
*/
static grpc_completion_queue* default_create(
const grpc_completion_queue_factory* factory,
const grpc_completion_queue_factory* /*factory*/,
const grpc_completion_queue_attributes* attr) {
return grpc_completion_queue_create_internal(
attr->cq_completion_type, attr->cq_polling_type, attr->cq_shutdown_cb);

@ -197,7 +197,7 @@ void grpc_shutdown_internal_locked(void) {
grpc_destroy_static_metadata_ctx();
}
void grpc_shutdown_internal(void* ignored) {
void grpc_shutdown_internal(void* /*ignored*/) {
GRPC_API_TRACE("grpc_shutdown_internal", 0, ());
grpc_core::MutexLock lock(&g_init_mu);
// We have released lock from the shutdown thread and it is possible that

@ -37,7 +37,7 @@
void grpc_security_pre_init(void) {}
static bool maybe_prepend_client_auth_filter(
grpc_channel_stack_builder* builder, void* arg) {
grpc_channel_stack_builder* builder, void* /*arg*/) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (args) {
@ -52,7 +52,7 @@ static bool maybe_prepend_client_auth_filter(
}
static bool maybe_prepend_server_auth_filter(
grpc_channel_stack_builder* builder, void* arg) {
grpc_channel_stack_builder* builder, void* /*arg*/) {
const grpc_channel_args* args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (args) {

@ -94,8 +94,8 @@ static void lame_start_transport_stream_op_batch(
calld->call_combiner);
}
static void lame_get_channel_info(grpc_channel_element* elem,
const grpc_channel_info* channel_info) {}
static void lame_get_channel_info(grpc_channel_element* /*elem*/,
const grpc_channel_info* /*channel_info*/) {}
static void lame_start_transport_op(grpc_channel_element* elem,
grpc_transport_op* op) {
@ -133,8 +133,8 @@ static grpc_error* lame_init_call_elem(grpc_call_element* elem,
return GRPC_ERROR_NONE;
}
static void lame_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
static void lame_destroy_call_elem(grpc_call_element* /*elem*/,
const grpc_call_final_info* /*final_info*/,
grpc_closure* then_schedule_closure) {
GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
}

@ -302,7 +302,7 @@ struct shutdown_cleanup_args {
grpc_slice slice;
};
static void shutdown_cleanup(void* arg, grpc_error* error) {
static void shutdown_cleanup(void* arg, grpc_error* /*error*/) {
struct shutdown_cleanup_args* a =
static_cast<struct shutdown_cleanup_args*>(arg);
grpc_slice_unref_internal(a->slice);
@ -367,7 +367,7 @@ static void request_matcher_destroy(request_matcher* rm) {
gpr_free(rm->requests_per_cq);
}
static void kill_zombie(void* elem, grpc_error* error) {
static void kill_zombie(void* elem, grpc_error* /*error*/) {
grpc_call_unref(
grpc_call_from_top_element(static_cast<grpc_call_element*>(elem)));
}
@ -449,7 +449,7 @@ static void orphan_channel(channel_data* chand) {
chand->next = chand->prev = chand;
}
static void finish_destroy_channel(void* cd, grpc_error* error) {
static void finish_destroy_channel(void* cd, grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(cd);
grpc_server* server = chand->server;
GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
@ -477,7 +477,7 @@ static void destroy_channel(channel_data* chand) {
op);
}
static void done_request_event(void* req, grpc_cq_completion* c) {
static void done_request_event(void* req, grpc_cq_completion* /*c*/) {
gpr_free(req);
}
@ -672,7 +672,8 @@ static int num_listeners(grpc_server* server) {
return n;
}
static void done_shutdown_event(void* server, grpc_cq_completion* completion) {
static void done_shutdown_event(void* server,
grpc_cq_completion* /*completion*/) {
server_unref(static_cast<grpc_server*>(server));
}
@ -850,7 +851,7 @@ static void got_initial_metadata(void* ptr, grpc_error* error) {
}
}
static void accept_stream(void* cd, grpc_transport* transport,
static void accept_stream(void* cd, grpc_transport* /*transport*/,
const void* transport_server_data) {
channel_data* chand = static_cast<channel_data*>(cd);
/* create a call */
@ -895,8 +896,8 @@ static grpc_error* server_init_call_elem(grpc_call_element* elem,
}
static void server_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
@ -1258,7 +1259,7 @@ void done_published_shutdown(void* done_arg, grpc_cq_completion* storage) {
gpr_free(storage);
}
static void listener_destroy_done(void* s, grpc_error* error) {
static void listener_destroy_done(void* s, grpc_error* /*error*/) {
grpc_server* server = static_cast<grpc_server*>(s);
gpr_mu_lock(&server->mu_global);
server->listeners_destroyed++;

@ -53,8 +53,8 @@ void SliceBufferByteStream::Orphan() {
// filter stack.
}
bool SliceBufferByteStream::Next(size_t max_size_hint,
grpc_closure* on_complete) {
bool SliceBufferByteStream::Next(size_t /*max_size_hint*/,
grpc_closure* /*on_complete*/) {
GPR_DEBUG_ASSERT(backing_buffer_.count > 0);
return true;
}

@ -72,7 +72,7 @@ class AsyncConnectivityStateWatcherInterface::Notifier {
}
private:
static void SendNotification(void* arg, grpc_error* ignored) {
static void SendNotification(void* arg, grpc_error* /*ignored*/) {
Notifier* self = static_cast<Notifier*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_connectivity_state_trace)) {
gpr_log(GPR_INFO, "watcher %p: delivering async notification for %s",

@ -50,6 +50,9 @@ static void assert_valid_list(grpc_mdelem_list* list) {
verified_count++;
}
GPR_ASSERT(list->count == verified_count);
#else
// Avoid unused-parameter warning for debug-only parameter
(void)list;
#endif /* NDEBUG */
}
@ -64,6 +67,9 @@ static void assert_valid_callouts(grpc_metadata_batch* batch) {
}
grpc_slice_unref_internal(key_interned);
}
#else
// Avoid unused-parameter warning for debug-only parameter
(void)batch;
#endif
}

Loading…
Cancel
Save