Remove unused-parameter warnings, round 2 (4 of 19)

pull/20718/head
Vijay Pai 5 years ago
parent 690e313c38
commit 93b9bb10a2
  1. 15
      src/core/ext/filters/client_channel/subchannel.cc
  2. 5
      src/core/ext/filters/client_channel/subchannel.h
  3. 22
      src/core/ext/filters/client_channel/xds/xds_client.cc
  4. 13
      src/core/ext/filters/client_idle/client_idle_filter.cc
  5. 12
      src/core/ext/filters/deadline/deadline_filter.cc
  6. 12
      src/core/ext/filters/http/client/http_client_filter.cc
  7. 6
      src/core/ext/filters/http/client_authority_filter.cc
  8. 10
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  9. 16
      src/core/ext/filters/http/server/http_server_filter.cc
  10. 18
      src/core/ext/filters/max_age/max_age_filter.cc

@ -213,11 +213,12 @@ void SubchannelCall::Unref() {
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
}
void SubchannelCall::Unref(const DebugLocation& location, const char* reason) {
void SubchannelCall::Unref(const DebugLocation& /*location*/,
const char* reason) {
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
}
void SubchannelCall::Destroy(void* arg, grpc_error* error) {
void SubchannelCall::Destroy(void* arg, grpc_error* /*error*/) {
GPR_TIMER_SCOPE("subchannel_call_destroy", 0);
SubchannelCall* self = static_cast<SubchannelCall*>(arg);
// Keep some members before destroying the subchannel call.
@ -300,8 +301,8 @@ void SubchannelCall::IncrementRefCount() {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
}
void SubchannelCall::IncrementRefCount(const grpc_core::DebugLocation& location,
const char* reason) {
void SubchannelCall::IncrementRefCount(
const grpc_core::DebugLocation& /*location*/, const char* reason) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
}
@ -721,7 +722,7 @@ Subchannel* Subchannel::WeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
namespace {
void subchannel_destroy(void* arg, grpc_error* error) {
void subchannel_destroy(void* arg, grpc_error* /*error*/) {
Subchannel* self = static_cast<Subchannel*>(arg);
Delete(self);
}
@ -739,7 +740,7 @@ void Subchannel::WeakUnref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
}
}
Subchannel* Subchannel::RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
Subchannel* Subchannel::RefFromWeakRef() {
for (;;) {
gpr_atm old_refs = gpr_atm_acq_load(&ref_pair_);
if (old_refs >= (1 << INTERNAL_REF_BITS)) {
@ -1008,7 +1009,7 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
namespace {
void ConnectionDestroy(void* arg, grpc_error* error) {
void ConnectionDestroy(void* arg, grpc_error* /*error*/) {
grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
grpc_channel_stack_destroy(stk);
gpr_free(stk);

@ -43,8 +43,7 @@
// For debugging refcounting.
#ifndef NDEBUG
#define GRPC_SUBCHANNEL_REF(p, r) (p)->Ref(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
(p)->RefFromWeakRef(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) (p)->RefFromWeakRef()
#define GRPC_SUBCHANNEL_UNREF(p, r) (p)->Unref(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) (p)->WeakRef(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) (p)->WeakUnref(__FILE__, __LINE__, (r))
@ -214,7 +213,7 @@ class Subchannel {
// Attempts to return a strong ref when only the weak refcount is guaranteed
// non-zero. If the strong refcount is zero, does not alter the refcount and
// returns null.
Subchannel* RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
Subchannel* RefFromWeakRef();
// Gets the string representing the subchannel address.
// Caller doesn't take ownership.

@ -639,7 +639,7 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceived(
}
void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* error) {
void* arg, grpc_error* /*error*/) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
XdsClient* xds_client = ads_calld->xds_client();
// Empty payload means the call was cancelled.
@ -1083,7 +1083,7 @@ void XdsClient::ChannelState::LrsCallState::OnInitialRequestSent(
}
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked(
void* arg, grpc_error* error) {
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
// Clear the send_message_payload_.
grpc_byte_buffer_destroy(lrs_calld->send_message_payload_);
@ -1102,7 +1102,7 @@ void XdsClient::ChannelState::LrsCallState::OnResponseReceived(
}
void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* error) {
void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
XdsClient* xds_client = lrs_calld->xds_client();
// Empty payload means the call was cancelled.
@ -1286,17 +1286,17 @@ void XdsClient::Orphan() {
Unref(DEBUG_LOCATION, "XdsClient::Orphan()");
}
void XdsClient::WatchClusterData(StringView cluster,
UniquePtr<ClusterWatcherInterface> watcher) {
void XdsClient::WatchClusterData(
StringView /*cluster*/, UniquePtr<ClusterWatcherInterface> /*watcher*/) {
// TODO(juanlishen): Implement.
}
void XdsClient::CancelClusterDataWatch(StringView cluster,
ClusterWatcherInterface* watcher) {
void XdsClient::CancelClusterDataWatch(StringView /*cluster*/,
ClusterWatcherInterface* /*watcher*/) {
// TODO(juanlishen): Implement.
}
void XdsClient::WatchEndpointData(StringView cluster,
void XdsClient::WatchEndpointData(StringView /*cluster*/,
UniquePtr<EndpointWatcherInterface> watcher) {
EndpointWatcherInterface* w = watcher.get();
cluster_state_.endpoint_watchers[w] = std::move(watcher);
@ -1308,7 +1308,7 @@ void XdsClient::WatchEndpointData(StringView cluster,
chand_->MaybeStartAdsCall();
}
void XdsClient::CancelEndpointDataWatch(StringView cluster,
void XdsClient::CancelEndpointDataWatch(StringView /*cluster*/,
EndpointWatcherInterface* watcher) {
auto it = cluster_state_.endpoint_watchers.find(watcher);
if (it != cluster_state_.endpoint_watchers.end()) {
@ -1319,13 +1319,13 @@ void XdsClient::CancelEndpointDataWatch(StringView cluster,
}
}
void XdsClient::AddClientStats(StringView cluster,
void XdsClient::AddClientStats(StringView /*cluster*/,
XdsClientStats* client_stats) {
cluster_state_.client_stats.insert(client_stats);
chand_->MaybeStartLrsCall();
}
void XdsClient::RemoveClientStats(StringView cluster,
void XdsClient::RemoveClientStats(StringView /*cluster*/,
XdsClientStats* client_stats) {
// TODO(roth): In principle, we should try to send a final load report
// containing whatever final stats have been accumulated since the

@ -283,7 +283,8 @@ void ChannelData::DecreaseCallCount() {
}
ChannelData::ChannelData(grpc_channel_element* elem,
grpc_channel_element_args* args, grpc_error** error)
grpc_channel_element_args* args,
grpc_error** /*error*/)
: elem_(elem),
channel_stack_(args->channel_stack),
client_idle_timeout_(GetClientIdleTimeout(args->channel_args)) {
@ -352,7 +353,7 @@ void ChannelData::IdleTimerCallback(void* arg, grpc_error* error) {
}
void ChannelData::IdleTransportOpCompleteCallback(void* arg,
grpc_error* error) {
grpc_error* /*error*/) {
ChannelData* chand = static_cast<ChannelData*>(arg);
GRPC_CHANNEL_STACK_UNREF(chand->channel_stack_, "idle transport op");
}
@ -389,15 +390,15 @@ class CallData {
};
grpc_error* CallData::Init(grpc_call_element* elem,
const grpc_call_element_args* args) {
const grpc_call_element_args* /*args*/) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
chand->IncreaseCallCount();
return GRPC_ERROR_NONE;
}
void CallData::Destroy(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
chand->DecreaseCallCount();
}
@ -416,7 +417,7 @@ const grpc_channel_filter grpc_client_idle_filter = {
"client_idle"};
static bool MaybeAddClientIdleFilter(grpc_channel_stack_builder* builder,
void* arg) {
void* /*arg*/) {
const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
if (!grpc_channel_args_want_minimal_stack(channel_args) &&

@ -38,7 +38,7 @@
// The on_complete callback used when sending a cancel_error batch down the
// filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(void* arg, grpc_error* ignored) {
static void yield_call_combiner(void* arg, grpc_error* /*ignored*/) {
grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
"got on_complete from cancel_stream batch");
@ -233,14 +233,14 @@ void grpc_deadline_state_client_start_transport_stream_op_batch(
//
// Constructor for channel_data. Used for both client and server filters.
static grpc_error* deadline_init_channel_elem(grpc_channel_element* elem,
static grpc_error* deadline_init_channel_elem(grpc_channel_element* /*elem*/,
grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE;
}
// Destructor for channel_data. Used for both client and server filters.
static void deadline_destroy_channel_elem(grpc_channel_element* elem) {}
static void deadline_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
// Call data used for both client and server filter.
typedef struct base_call_data {
@ -268,9 +268,9 @@ static grpc_error* deadline_init_call_elem(grpc_call_element* elem,
}
// Destructor for call_data. Used for both client and server filters.
static void deadline_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
static void deadline_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
grpc_deadline_state* deadline_state =
static_cast<grpc_deadline_state*>(elem->call_data);
deadline_state->~grpc_deadline_state();

@ -99,8 +99,7 @@ struct channel_data {
};
} // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
static grpc_error* client_filter_incoming_metadata(grpc_metadata_batch* b) {
if (b->idx.named.status != nullptr) {
/* If both gRPC status and HTTP status are provided in the response, we
* should prefer the gRPC status code, as mentioned in
@ -177,7 +176,7 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data);
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata);
error = client_filter_incoming_metadata(calld->recv_initial_metadata);
calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
} else {
GRPC_ERROR_REF(error);
@ -204,8 +203,7 @@ static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
return;
}
if (error == GRPC_ERROR_NONE) {
error =
client_filter_incoming_metadata(elem, calld->recv_trailing_metadata);
error = client_filter_incoming_metadata(calld->recv_trailing_metadata);
} else {
GRPC_ERROR_REF(error);
}
@ -473,8 +471,8 @@ static grpc_error* http_client_init_call_elem(
/* Destructor for call_data */
static void http_client_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* final_info,
grpc_closure* ignored) {
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
}

@ -80,9 +80,9 @@ grpc_error* client_authority_init_call_elem(
}
/* Destructor for call_data */
void client_authority_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {}
void client_authority_destroy_call_elem(
grpc_call_element* /*elem*/, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {}
/* Constructor for channel_data */
grpc_error* client_authority_init_channel_elem(

@ -361,7 +361,7 @@ static void on_send_message_next_done(void* arg, grpc_error* error) {
}
}
static void start_send_message_batch(void* arg, grpc_error* unused) {
static void start_send_message_batch(void* arg, grpc_error* /*unused*/) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
if (skip_message_compression(elem)) {
send_message_batch_continue(elem);
@ -448,9 +448,9 @@ static grpc_error* compress_init_call_elem(grpc_call_element* elem,
}
/* Destructor for call_data */
static void compress_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
static void compress_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
}
@ -487,7 +487,7 @@ static grpc_error* compress_init_channel_elem(grpc_channel_element* elem,
}
/* Destructor for channel data */
static void compress_destroy_channel_elem(grpc_channel_element* elem) {}
static void compress_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
const grpc_channel_filter grpc_message_compress_filter = {
compress_start_transport_stream_op_batch,

@ -99,8 +99,7 @@ struct channel_data {
} // namespace
static grpc_error* hs_filter_outgoing_metadata(grpc_call_element* elem,
grpc_metadata_batch* b) {
static grpc_error* hs_filter_outgoing_metadata(grpc_metadata_batch* b) {
if (b->idx.named.grpc_message != nullptr) {
grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
GRPC_MDVALUE(b->idx.named.grpc_message->md),
@ -427,10 +426,9 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
&calld->content_type,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
GRPC_BATCH_CONTENT_TYPE));
hs_add_error(
error_name, &error,
hs_add_error(error_name, &error,
hs_filter_outgoing_metadata(
elem, op->payload->send_initial_metadata.send_initial_metadata));
op->payload->send_initial_metadata.send_initial_metadata));
if (error != GRPC_ERROR_NONE) return error;
}
@ -463,7 +461,7 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
if (op->send_trailing_metadata) {
grpc_error* error = hs_filter_outgoing_metadata(
elem, op->payload->send_trailing_metadata.send_trailing_metadata);
op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) return error;
}
@ -492,8 +490,8 @@ static grpc_error* hs_init_call_elem(grpc_call_element* elem,
/* Destructor for call_data */
static void hs_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
}
@ -511,7 +509,7 @@ static grpc_error* hs_init_channel_elem(grpc_channel_element* elem,
}
/* Destructor for channel data */
static void hs_destroy_channel_elem(grpc_channel_element* elem) {}
static void hs_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_stream_op_batch,

@ -206,7 +206,7 @@ static void decrease_call_count(channel_data* chand) {
}
}
static void start_max_idle_timer_after_init(void* arg, grpc_error* error) {
static void start_max_idle_timer_after_init(void* arg, grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg);
/* Decrease call_count. If there are no active calls at this time,
max_idle_timer will start here. If the number of active calls is not 0,
@ -257,7 +257,7 @@ class ConnectivityWatcher : public AsyncConnectivityStateWatcherInterface {
} // namespace grpc_core
static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
static void start_max_age_timer_after_init(void* arg, grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true;
@ -276,7 +276,7 @@ static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
}
static void start_max_age_grace_timer_after_goaway_op(void* arg,
grpc_error* error) {
grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true;
@ -407,17 +407,17 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
}
/* Constructor for call_data. */
static grpc_error* max_age_init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
static grpc_error* max_age_init_call_elem(
grpc_call_element* elem, const grpc_call_element_args* /*args*/) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
increase_call_count(chand);
return GRPC_ERROR_NONE;
}
/* Destructor for call_data. */
static void max_age_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
static void max_age_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
decrease_call_count(chand);
}
@ -527,7 +527,7 @@ const grpc_channel_filter grpc_max_age_filter = {
"max_age"};
static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder,
void* arg) {
void* /*arg*/) {
const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable =

Loading…
Cancel
Save