Remove unused-parameter warnings, round 2 (4 of 19)

pull/20718/head
Vijay Pai 5 years ago
parent 690e313c38
commit 93b9bb10a2
  1. 15
      src/core/ext/filters/client_channel/subchannel.cc
  2. 5
      src/core/ext/filters/client_channel/subchannel.h
  3. 22
      src/core/ext/filters/client_channel/xds/xds_client.cc
  4. 13
      src/core/ext/filters/client_idle/client_idle_filter.cc
  5. 12
      src/core/ext/filters/deadline/deadline_filter.cc
  6. 12
      src/core/ext/filters/http/client/http_client_filter.cc
  7. 6
      src/core/ext/filters/http/client_authority_filter.cc
  8. 10
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  9. 18
      src/core/ext/filters/http/server/http_server_filter.cc
  10. 18
      src/core/ext/filters/max_age/max_age_filter.cc

@ -213,11 +213,12 @@ void SubchannelCall::Unref() {
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), ""); GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
} }
void SubchannelCall::Unref(const DebugLocation& location, const char* reason) { void SubchannelCall::Unref(const DebugLocation& /*location*/,
const char* reason) {
GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason); GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
} }
void SubchannelCall::Destroy(void* arg, grpc_error* error) { void SubchannelCall::Destroy(void* arg, grpc_error* /*error*/) {
GPR_TIMER_SCOPE("subchannel_call_destroy", 0); GPR_TIMER_SCOPE("subchannel_call_destroy", 0);
SubchannelCall* self = static_cast<SubchannelCall*>(arg); SubchannelCall* self = static_cast<SubchannelCall*>(arg);
// Keep some members before destroying the subchannel call. // Keep some members before destroying the subchannel call.
@ -300,8 +301,8 @@ void SubchannelCall::IncrementRefCount() {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), ""); GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), "");
} }
void SubchannelCall::IncrementRefCount(const grpc_core::DebugLocation& location, void SubchannelCall::IncrementRefCount(
const char* reason) { const grpc_core::DebugLocation& /*location*/, const char* reason) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason); GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(this), reason);
} }
@ -721,7 +722,7 @@ Subchannel* Subchannel::WeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
namespace { namespace {
void subchannel_destroy(void* arg, grpc_error* error) { void subchannel_destroy(void* arg, grpc_error* /*error*/) {
Subchannel* self = static_cast<Subchannel*>(arg); Subchannel* self = static_cast<Subchannel*>(arg);
Delete(self); Delete(self);
} }
@ -739,7 +740,7 @@ void Subchannel::WeakUnref(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
} }
} }
Subchannel* Subchannel::RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS) { Subchannel* Subchannel::RefFromWeakRef() {
for (;;) { for (;;) {
gpr_atm old_refs = gpr_atm_acq_load(&ref_pair_); gpr_atm old_refs = gpr_atm_acq_load(&ref_pair_);
if (old_refs >= (1 << INTERNAL_REF_BITS)) { if (old_refs >= (1 << INTERNAL_REF_BITS)) {
@ -1008,7 +1009,7 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error* error) {
namespace { namespace {
void ConnectionDestroy(void* arg, grpc_error* error) { void ConnectionDestroy(void* arg, grpc_error* /*error*/) {
grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg); grpc_channel_stack* stk = static_cast<grpc_channel_stack*>(arg);
grpc_channel_stack_destroy(stk); grpc_channel_stack_destroy(stk);
gpr_free(stk); gpr_free(stk);

@ -43,8 +43,7 @@
// For debugging refcounting. // For debugging refcounting.
#ifndef NDEBUG #ifndef NDEBUG
#define GRPC_SUBCHANNEL_REF(p, r) (p)->Ref(__FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_REF(p, r) (p)->Ref(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \ #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) (p)->RefFromWeakRef()
(p)->RefFromWeakRef(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(p, r) (p)->Unref(__FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_UNREF(p, r) (p)->Unref(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) (p)->WeakRef(__FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_WEAK_REF(p, r) (p)->WeakRef(__FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) (p)->WeakUnref(__FILE__, __LINE__, (r)) #define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) (p)->WeakUnref(__FILE__, __LINE__, (r))
@ -214,7 +213,7 @@ class Subchannel {
// Attempts to return a strong ref when only the weak refcount is guaranteed // Attempts to return a strong ref when only the weak refcount is guaranteed
// non-zero. If the strong refcount is zero, does not alter the refcount and // non-zero. If the strong refcount is zero, does not alter the refcount and
// returns null. // returns null.
Subchannel* RefFromWeakRef(GRPC_SUBCHANNEL_REF_EXTRA_ARGS); Subchannel* RefFromWeakRef();
// Gets the string representing the subchannel address. // Gets the string representing the subchannel address.
// Caller doesn't take ownership. // Caller doesn't take ownership.

@ -639,7 +639,7 @@ void XdsClient::ChannelState::AdsCallState::OnResponseReceived(
} }
void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked( void XdsClient::ChannelState::AdsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* error) { void* arg, grpc_error* /*error*/) {
AdsCallState* ads_calld = static_cast<AdsCallState*>(arg); AdsCallState* ads_calld = static_cast<AdsCallState*>(arg);
XdsClient* xds_client = ads_calld->xds_client(); XdsClient* xds_client = ads_calld->xds_client();
// Empty payload means the call was cancelled. // Empty payload means the call was cancelled.
@ -1083,7 +1083,7 @@ void XdsClient::ChannelState::LrsCallState::OnInitialRequestSent(
} }
void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked( void XdsClient::ChannelState::LrsCallState::OnInitialRequestSentLocked(
void* arg, grpc_error* error) { void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg); LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
// Clear the send_message_payload_. // Clear the send_message_payload_.
grpc_byte_buffer_destroy(lrs_calld->send_message_payload_); grpc_byte_buffer_destroy(lrs_calld->send_message_payload_);
@ -1102,7 +1102,7 @@ void XdsClient::ChannelState::LrsCallState::OnResponseReceived(
} }
void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked( void XdsClient::ChannelState::LrsCallState::OnResponseReceivedLocked(
void* arg, grpc_error* error) { void* arg, grpc_error* /*error*/) {
LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg); LrsCallState* lrs_calld = static_cast<LrsCallState*>(arg);
XdsClient* xds_client = lrs_calld->xds_client(); XdsClient* xds_client = lrs_calld->xds_client();
// Empty payload means the call was cancelled. // Empty payload means the call was cancelled.
@ -1286,17 +1286,17 @@ void XdsClient::Orphan() {
Unref(DEBUG_LOCATION, "XdsClient::Orphan()"); Unref(DEBUG_LOCATION, "XdsClient::Orphan()");
} }
void XdsClient::WatchClusterData(StringView cluster, void XdsClient::WatchClusterData(
UniquePtr<ClusterWatcherInterface> watcher) { StringView /*cluster*/, UniquePtr<ClusterWatcherInterface> /*watcher*/) {
// TODO(juanlishen): Implement. // TODO(juanlishen): Implement.
} }
void XdsClient::CancelClusterDataWatch(StringView cluster, void XdsClient::CancelClusterDataWatch(StringView /*cluster*/,
ClusterWatcherInterface* watcher) { ClusterWatcherInterface* /*watcher*/) {
// TODO(juanlishen): Implement. // TODO(juanlishen): Implement.
} }
void XdsClient::WatchEndpointData(StringView cluster, void XdsClient::WatchEndpointData(StringView /*cluster*/,
UniquePtr<EndpointWatcherInterface> watcher) { UniquePtr<EndpointWatcherInterface> watcher) {
EndpointWatcherInterface* w = watcher.get(); EndpointWatcherInterface* w = watcher.get();
cluster_state_.endpoint_watchers[w] = std::move(watcher); cluster_state_.endpoint_watchers[w] = std::move(watcher);
@ -1308,7 +1308,7 @@ void XdsClient::WatchEndpointData(StringView cluster,
chand_->MaybeStartAdsCall(); chand_->MaybeStartAdsCall();
} }
void XdsClient::CancelEndpointDataWatch(StringView cluster, void XdsClient::CancelEndpointDataWatch(StringView /*cluster*/,
EndpointWatcherInterface* watcher) { EndpointWatcherInterface* watcher) {
auto it = cluster_state_.endpoint_watchers.find(watcher); auto it = cluster_state_.endpoint_watchers.find(watcher);
if (it != cluster_state_.endpoint_watchers.end()) { if (it != cluster_state_.endpoint_watchers.end()) {
@ -1319,13 +1319,13 @@ void XdsClient::CancelEndpointDataWatch(StringView cluster,
} }
} }
void XdsClient::AddClientStats(StringView cluster, void XdsClient::AddClientStats(StringView /*cluster*/,
XdsClientStats* client_stats) { XdsClientStats* client_stats) {
cluster_state_.client_stats.insert(client_stats); cluster_state_.client_stats.insert(client_stats);
chand_->MaybeStartLrsCall(); chand_->MaybeStartLrsCall();
} }
void XdsClient::RemoveClientStats(StringView cluster, void XdsClient::RemoveClientStats(StringView /*cluster*/,
XdsClientStats* client_stats) { XdsClientStats* client_stats) {
// TODO(roth): In principle, we should try to send a final load report // TODO(roth): In principle, we should try to send a final load report
// containing whatever final stats have been accumulated since the // containing whatever final stats have been accumulated since the

@ -283,7 +283,8 @@ void ChannelData::DecreaseCallCount() {
} }
ChannelData::ChannelData(grpc_channel_element* elem, ChannelData::ChannelData(grpc_channel_element* elem,
grpc_channel_element_args* args, grpc_error** error) grpc_channel_element_args* args,
grpc_error** /*error*/)
: elem_(elem), : elem_(elem),
channel_stack_(args->channel_stack), channel_stack_(args->channel_stack),
client_idle_timeout_(GetClientIdleTimeout(args->channel_args)) { client_idle_timeout_(GetClientIdleTimeout(args->channel_args)) {
@ -352,7 +353,7 @@ void ChannelData::IdleTimerCallback(void* arg, grpc_error* error) {
} }
void ChannelData::IdleTransportOpCompleteCallback(void* arg, void ChannelData::IdleTransportOpCompleteCallback(void* arg,
grpc_error* error) { grpc_error* /*error*/) {
ChannelData* chand = static_cast<ChannelData*>(arg); ChannelData* chand = static_cast<ChannelData*>(arg);
GRPC_CHANNEL_STACK_UNREF(chand->channel_stack_, "idle transport op"); GRPC_CHANNEL_STACK_UNREF(chand->channel_stack_, "idle transport op");
} }
@ -389,15 +390,15 @@ class CallData {
}; };
grpc_error* CallData::Init(grpc_call_element* elem, grpc_error* CallData::Init(grpc_call_element* elem,
const grpc_call_element_args* args) { const grpc_call_element_args* /*args*/) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data); ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
chand->IncreaseCallCount(); chand->IncreaseCallCount();
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
void CallData::Destroy(grpc_call_element* elem, void CallData::Destroy(grpc_call_element* elem,
const grpc_call_final_info* final_info, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data); ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
chand->DecreaseCallCount(); chand->DecreaseCallCount();
} }
@ -416,7 +417,7 @@ const grpc_channel_filter grpc_client_idle_filter = {
"client_idle"}; "client_idle"};
static bool MaybeAddClientIdleFilter(grpc_channel_stack_builder* builder, static bool MaybeAddClientIdleFilter(grpc_channel_stack_builder* builder,
void* arg) { void* /*arg*/) {
const grpc_channel_args* channel_args = const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder); grpc_channel_stack_builder_get_channel_arguments(builder);
if (!grpc_channel_args_want_minimal_stack(channel_args) && if (!grpc_channel_args_want_minimal_stack(channel_args) &&

@ -38,7 +38,7 @@
// The on_complete callback used when sending a cancel_error batch down the // The on_complete callback used when sending a cancel_error batch down the
// filter stack. Yields the call combiner when the batch returns. // filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(void* arg, grpc_error* ignored) { static void yield_call_combiner(void* arg, grpc_error* /*ignored*/) {
grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg); grpc_deadline_state* deadline_state = static_cast<grpc_deadline_state*>(arg);
GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner, GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
"got on_complete from cancel_stream batch"); "got on_complete from cancel_stream batch");
@ -233,14 +233,14 @@ void grpc_deadline_state_client_start_transport_stream_op_batch(
// //
// Constructor for channel_data. Used for both client and server filters. // Constructor for channel_data. Used for both client and server filters.
static grpc_error* deadline_init_channel_elem(grpc_channel_element* elem, static grpc_error* deadline_init_channel_elem(grpc_channel_element* /*elem*/,
grpc_channel_element_args* args) { grpc_channel_element_args* args) {
GPR_ASSERT(!args->is_last); GPR_ASSERT(!args->is_last);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
// Destructor for channel_data. Used for both client and server filters. // Destructor for channel_data. Used for both client and server filters.
static void deadline_destroy_channel_elem(grpc_channel_element* elem) {} static void deadline_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
// Call data used for both client and server filter. // Call data used for both client and server filter.
typedef struct base_call_data { typedef struct base_call_data {
@ -268,9 +268,9 @@ static grpc_error* deadline_init_call_elem(grpc_call_element* elem,
} }
// Destructor for call_data. Used for both client and server filters. // Destructor for call_data. Used for both client and server filters.
static void deadline_destroy_call_elem(grpc_call_element* elem, static void deadline_destroy_call_elem(
const grpc_call_final_info* final_info, grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
grpc_deadline_state* deadline_state = grpc_deadline_state* deadline_state =
static_cast<grpc_deadline_state*>(elem->call_data); static_cast<grpc_deadline_state*>(elem->call_data);
deadline_state->~grpc_deadline_state(); deadline_state->~grpc_deadline_state();

@ -99,8 +99,7 @@ struct channel_data {
}; };
} // namespace } // namespace
static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem, static grpc_error* client_filter_incoming_metadata(grpc_metadata_batch* b) {
grpc_metadata_batch* b) {
if (b->idx.named.status != nullptr) { if (b->idx.named.status != nullptr) {
/* If both gRPC status and HTTP status are provided in the response, we /* If both gRPC status and HTTP status are provided in the response, we
* should prefer the gRPC status code, as mentioned in * should prefer the gRPC status code, as mentioned in
@ -177,7 +176,7 @@ static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data); grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata); error = client_filter_incoming_metadata(calld->recv_initial_metadata);
calld->recv_initial_metadata_error = GRPC_ERROR_REF(error); calld->recv_initial_metadata_error = GRPC_ERROR_REF(error);
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
@ -204,8 +203,7 @@ static void recv_trailing_metadata_ready(void* user_data, grpc_error* error) {
return; return;
} }
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
error = error = client_filter_incoming_metadata(calld->recv_trailing_metadata);
client_filter_incoming_metadata(elem, calld->recv_trailing_metadata);
} else { } else {
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
} }
@ -473,8 +471,8 @@ static grpc_error* http_client_init_call_elem(
/* Destructor for call_data */ /* Destructor for call_data */
static void http_client_destroy_call_elem( static void http_client_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* final_info, grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data(); calld->~call_data();
} }

@ -80,9 +80,9 @@ grpc_error* client_authority_init_call_elem(
} }
/* Destructor for call_data */ /* Destructor for call_data */
void client_authority_destroy_call_elem(grpc_call_element* elem, void client_authority_destroy_call_elem(
const grpc_call_final_info* final_info, grpc_call_element* /*elem*/, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) {} grpc_closure* /*ignored*/) {}
/* Constructor for channel_data */ /* Constructor for channel_data */
grpc_error* client_authority_init_channel_elem( grpc_error* client_authority_init_channel_elem(

@ -361,7 +361,7 @@ static void on_send_message_next_done(void* arg, grpc_error* error) {
} }
} }
static void start_send_message_batch(void* arg, grpc_error* unused) { static void start_send_message_batch(void* arg, grpc_error* /*unused*/) {
grpc_call_element* elem = static_cast<grpc_call_element*>(arg); grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
if (skip_message_compression(elem)) { if (skip_message_compression(elem)) {
send_message_batch_continue(elem); send_message_batch_continue(elem);
@ -448,9 +448,9 @@ static grpc_error* compress_init_call_elem(grpc_call_element* elem,
} }
/* Destructor for call_data */ /* Destructor for call_data */
static void compress_destroy_call_elem(grpc_call_element* elem, static void compress_destroy_call_elem(
const grpc_call_final_info* final_info, grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data(); calld->~call_data();
} }
@ -487,7 +487,7 @@ static grpc_error* compress_init_channel_elem(grpc_channel_element* elem,
} }
/* Destructor for channel data */ /* Destructor for channel data */
static void compress_destroy_channel_elem(grpc_channel_element* elem) {} static void compress_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
const grpc_channel_filter grpc_message_compress_filter = { const grpc_channel_filter grpc_message_compress_filter = {
compress_start_transport_stream_op_batch, compress_start_transport_stream_op_batch,

@ -99,8 +99,7 @@ struct channel_data {
} // namespace } // namespace
static grpc_error* hs_filter_outgoing_metadata(grpc_call_element* elem, static grpc_error* hs_filter_outgoing_metadata(grpc_metadata_batch* b) {
grpc_metadata_batch* b) {
if (b->idx.named.grpc_message != nullptr) { if (b->idx.named.grpc_message != nullptr) {
grpc_slice pct_encoded_msg = grpc_percent_encode_slice( grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
GRPC_MDVALUE(b->idx.named.grpc_message->md), GRPC_MDVALUE(b->idx.named.grpc_message->md),
@ -427,10 +426,9 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
&calld->content_type, &calld->content_type,
GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
GRPC_BATCH_CONTENT_TYPE)); GRPC_BATCH_CONTENT_TYPE));
hs_add_error( hs_add_error(error_name, &error,
error_name, &error, hs_filter_outgoing_metadata(
hs_filter_outgoing_metadata( op->payload->send_initial_metadata.send_initial_metadata));
elem, op->payload->send_initial_metadata.send_initial_metadata));
if (error != GRPC_ERROR_NONE) return error; if (error != GRPC_ERROR_NONE) return error;
} }
@ -463,7 +461,7 @@ static grpc_error* hs_mutate_op(grpc_call_element* elem,
if (op->send_trailing_metadata) { if (op->send_trailing_metadata) {
grpc_error* error = hs_filter_outgoing_metadata( grpc_error* error = hs_filter_outgoing_metadata(
elem, op->payload->send_trailing_metadata.send_trailing_metadata); op->payload->send_trailing_metadata.send_trailing_metadata);
if (error != GRPC_ERROR_NONE) return error; if (error != GRPC_ERROR_NONE) return error;
} }
@ -492,8 +490,8 @@ static grpc_error* hs_init_call_elem(grpc_call_element* elem,
/* Destructor for call_data */ /* Destructor for call_data */
static void hs_destroy_call_elem(grpc_call_element* elem, static void hs_destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data(); calld->~call_data();
} }
@ -511,7 +509,7 @@ static grpc_error* hs_init_channel_elem(grpc_channel_element* elem,
} }
/* Destructor for channel data */ /* Destructor for channel data */
static void hs_destroy_channel_elem(grpc_channel_element* elem) {} static void hs_destroy_channel_elem(grpc_channel_element* /*elem*/) {}
const grpc_channel_filter grpc_http_server_filter = { const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_stream_op_batch, hs_start_transport_stream_op_batch,

@ -206,7 +206,7 @@ static void decrease_call_count(channel_data* chand) {
} }
} }
static void start_max_idle_timer_after_init(void* arg, grpc_error* error) { static void start_max_idle_timer_after_init(void* arg, grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
/* Decrease call_count. If there are no active calls at this time, /* Decrease call_count. If there are no active calls at this time,
max_idle_timer will start here. If the number of active calls is not 0, max_idle_timer will start here. If the number of active calls is not 0,
@ -257,7 +257,7 @@ class ConnectivityWatcher : public AsyncConnectivityStateWatcherInterface {
} // namespace grpc_core } // namespace grpc_core
static void start_max_age_timer_after_init(void* arg, grpc_error* error) { static void start_max_age_timer_after_init(void* arg, grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu); gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_timer_pending = true; chand->max_age_timer_pending = true;
@ -276,7 +276,7 @@ static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
} }
static void start_max_age_grace_timer_after_goaway_op(void* arg, static void start_max_age_grace_timer_after_goaway_op(void* arg,
grpc_error* error) { grpc_error* /*error*/) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
gpr_mu_lock(&chand->max_age_timer_mu); gpr_mu_lock(&chand->max_age_timer_mu);
chand->max_age_grace_timer_pending = true; chand->max_age_grace_timer_pending = true;
@ -407,17 +407,17 @@ add_random_max_connection_age_jitter_and_convert_to_grpc_millis(int value) {
} }
/* Constructor for call_data. */ /* Constructor for call_data. */
static grpc_error* max_age_init_call_elem(grpc_call_element* elem, static grpc_error* max_age_init_call_elem(
const grpc_call_element_args* args) { grpc_call_element* elem, const grpc_call_element_args* /*args*/) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
increase_call_count(chand); increase_call_count(chand);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
/* Destructor for call_data. */ /* Destructor for call_data. */
static void max_age_destroy_call_elem(grpc_call_element* elem, static void max_age_destroy_call_elem(
const grpc_call_final_info* final_info, grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* ignored) { grpc_closure* /*ignored*/) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
decrease_call_count(chand); decrease_call_count(chand);
} }
@ -527,7 +527,7 @@ const grpc_channel_filter grpc_max_age_filter = {
"max_age"}; "max_age"};
static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder, static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder,
void* arg) { void* /*arg*/) {
const grpc_channel_args* channel_args = const grpc_channel_args* channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder); grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = bool enable =

Loading…
Cancel
Save