Remove unused-parameter warnings, round 2 (11 of 19)

pull/20708/head
Vijay Pai 5 years ago
parent 2d80e830c0
commit 8a8348b2ca
  1. 2
      src/core/ext/filters/http/client/http_client_filter.cc
  2. 12
      src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
  3. 2
      src/core/lib/security/security_connector/ssl_utils.cc
  4. 10
      src/core/lib/security/security_connector/tls/spiffe_security_connector.cc
  5. 4
      src/core/lib/security/transport/client_auth_filter.cc
  6. 6
      src/core/lib/security/transport/security_handshaker.cc
  7. 4
      src/core/lib/security/transport/server_auth_filter.cc
  8. 7
      src/core/lib/slice/b64.cc
  9. 3
      src/core/lib/slice/b64.h
  10. 23
      src/core/lib/surface/call.cc
  11. 13
      src/core/lib/surface/call.h
  12. 3
      src/core/lib/surface/call_log_batch.cc
  13. 10
      src/core/lib/surface/channel.cc
  14. 19
      src/core/lib/surface/channel.h
  15. 7
      src/core/lib/transport/status_metadata.cc
  16. 18
      src/core/lib/transport/status_metadata.h

@ -302,7 +302,7 @@ static grpc_error* update_path_for_get(grpc_call_element* elem,
size_t estimated_len = GRPC_SLICE_LENGTH(path_slice);
estimated_len++; /* for the '?' */
estimated_len += grpc_base64_estimate_encoded_size(
batch->payload->send_message.send_message->length(), true /* url_safe */,
batch->payload->send_message.send_message->length(),
false /* multi_line */);
grpc_core::UnmanagedMemorySlice path_with_query_slice(estimated_len);
/* memcopy individual pieces into this slice */

@ -118,7 +118,7 @@ class grpc_ssl_channel_security_connector final
}
void add_handshakers(const grpc_channel_args* args,
grpc_pollset_set* interested_parties,
grpc_pollset_set* /*interested_parties*/,
grpc_core::HandshakeManager* handshake_mgr) override {
// Instantiate TSI handshaker.
tsi_handshaker* tsi_hs = nullptr;
@ -136,7 +136,7 @@ class grpc_ssl_channel_security_connector final
handshake_mgr->Add(grpc_core::SecurityHandshakerCreate(tsi_hs, this, args));
}
void check_peer(tsi_peer peer, grpc_endpoint* ep,
void check_peer(tsi_peer peer, grpc_endpoint* /*ep*/,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) override {
const char* target_name = overridden_target_name_ != nullptr
@ -188,7 +188,7 @@ class grpc_ssl_channel_security_connector final
bool check_call_host(grpc_core::StringView host,
grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_closure* /*on_call_host_checked*/,
grpc_error** error) override {
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);
@ -207,7 +207,7 @@ class grpc_ssl_channel_security_connector final
return true;
}
void cancel_check_call_host(grpc_closure* on_call_host_checked,
void cancel_check_call_host(grpc_closure* /*on_call_host_checked*/,
grpc_error* error) override {
GRPC_ERROR_UNREF(error);
}
@ -281,7 +281,7 @@ class grpc_ssl_server_security_connector
}
void add_handshakers(const grpc_channel_args* args,
grpc_pollset_set* interested_parties,
grpc_pollset_set* /*interested_parties*/,
grpc_core::HandshakeManager* handshake_mgr) override {
// Instantiate TSI handshaker.
try_fetch_ssl_server_credentials();
@ -297,7 +297,7 @@ class grpc_ssl_server_security_connector
handshake_mgr->Add(grpc_core::SecurityHandshakerCreate(tsi_hs, this, args));
}
void check_peer(tsi_peer peer, grpc_endpoint* ep,
void check_peer(tsi_peer peer, grpc_endpoint* /*ep*/,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) override {
grpc_error* error = ssl_check_peer(nullptr, &peer, auth_context);

@ -143,7 +143,7 @@ bool grpc_ssl_check_call_host(grpc_core::StringView host,
grpc_core::StringView target_name,
grpc_core::StringView overridden_target_name,
grpc_auth_context* auth_context,
grpc_closure* on_call_host_checked,
grpc_closure* /*on_call_host_checked*/,
grpc_error** error) {
grpc_security_status status = GRPC_SECURITY_ERROR;
tsi_peer peer = grpc_shallow_peer_from_ssl_auth_context(auth_context);

@ -141,7 +141,7 @@ SpiffeChannelSecurityConnector::~SpiffeChannelSecurityConnector() {
}
void SpiffeChannelSecurityConnector::add_handshakers(
const grpc_channel_args* args, grpc_pollset_set* interested_parties,
const grpc_channel_args* args, grpc_pollset_set* /*interested_parties*/,
grpc_core::HandshakeManager* handshake_mgr) {
if (RefreshHandshakerFactory() != GRPC_SECURITY_OK) {
gpr_log(GPR_ERROR, "Handshaker factory refresh failed.");
@ -164,7 +164,7 @@ void SpiffeChannelSecurityConnector::add_handshakers(
}
void SpiffeChannelSecurityConnector::check_peer(
tsi_peer peer, grpc_endpoint* ep,
tsi_peer peer, grpc_endpoint* /*ep*/,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) {
const char* target_name = overridden_target_name_ != nullptr
@ -239,7 +239,7 @@ bool SpiffeChannelSecurityConnector::check_call_host(
}
void SpiffeChannelSecurityConnector::cancel_check_call_host(
grpc_closure* on_call_host_checked, grpc_error* error) {
grpc_closure* /*on_call_host_checked*/, grpc_error* error) {
GRPC_ERROR_UNREF(error);
}
@ -419,7 +419,7 @@ SpiffeServerSecurityConnector::~SpiffeServerSecurityConnector() {
}
void SpiffeServerSecurityConnector::add_handshakers(
const grpc_channel_args* args, grpc_pollset_set* interested_parties,
const grpc_channel_args* args, grpc_pollset_set* /*interested_parties*/,
grpc_core::HandshakeManager* handshake_mgr) {
/* Refresh handshaker factory if needed. */
if (RefreshHandshakerFactory() != GRPC_SECURITY_OK) {
@ -439,7 +439,7 @@ void SpiffeServerSecurityConnector::add_handshakers(
}
void SpiffeServerSecurityConnector::check_peer(
tsi_peer peer, grpc_endpoint* ep,
tsi_peer peer, grpc_endpoint* /*ep*/,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) {
grpc_error* error = grpc_ssl_check_alpn(&peer);

@ -383,8 +383,8 @@ static void client_auth_set_pollset_or_pollset_set(
/* Destructor for call_data */
static void client_auth_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* final_info,
grpc_closure* ignored) {
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->destroy();
}

@ -421,7 +421,7 @@ void SecurityHandshaker::Shutdown(grpc_error* why) {
GRPC_ERROR_UNREF(why);
}
void SecurityHandshaker::DoHandshake(grpc_tcp_server_acceptor* acceptor,
void SecurityHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) {
auto ref = Ref();
@ -446,9 +446,9 @@ class FailHandshaker : public Handshaker {
public:
const char* name() const override { return "security_fail"; }
void Shutdown(grpc_error* why) override { GRPC_ERROR_UNREF(why); }
void DoHandshake(grpc_tcp_server_acceptor* acceptor,
void DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_closure* on_handshake_done,
HandshakerArgs* args) override {
HandshakerArgs* /*args*/) override {
GRPC_CLOSURE_SCHED(on_handshake_done,
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to create security handshaker"));

@ -286,8 +286,8 @@ static grpc_error* server_auth_init_call_elem(
/* Destructor for call_data */
static void server_auth_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* final_info,
grpc_closure* ignored) {
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
}

@ -59,14 +59,13 @@ static const char base64_url_safe_chars[] =
char* grpc_base64_encode(const void* vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
grpc_base64_estimate_encoded_size(data_size, multiline);
char* result = static_cast<char*>(gpr_malloc(result_projected_size));
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
int multiline) {
size_t grpc_base64_estimate_encoded_size(size_t data_size, int multiline) {
size_t result_projected_size =
4 * ((data_size + 3) / 3) +
2 * (multiline ? (data_size / (3 * GRPC_BASE64_MULTILINE_NUM_BLOCKS))
@ -81,7 +80,7 @@ void grpc_base64_encode_core(char* result, const void* vdata, size_t data_size,
const char* base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
grpc_base64_estimate_encoded_size(data_size, multiline);
char* current = result;
size_t num_blocks = 0;

@ -31,8 +31,7 @@ char* grpc_base64_encode(const void* data, size_t data_size, int url_safe,
/* estimate the upper bound on size of base64 encoded data. The actual size
* is guaranteed to be less than or equal to the size returned here. */
size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
int multiline);
size_t grpc_base64_estimate_encoded_size(size_t data_size, int multiline);
/* Encodes data using base64 and write it to memory pointed to by result. It is
* the caller's responsibility to allocate enough memory in |result| to fit the

@ -511,7 +511,7 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
GRPC_CALL_STACK_UNREF(CALL_STACK_FROM_CALL(c), REF_REASON);
}
static void release_call(void* call, grpc_error* error) {
static void release_call(void* call, grpc_error* /*error*/) {
grpc_call* c = static_cast<grpc_call*>(call);
grpc_channel* channel = c->channel;
grpc_core::Arena* arena = c->arena;
@ -520,7 +520,7 @@ static void release_call(void* call, grpc_error* error) {
GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
}
static void destroy_call(void* call, grpc_error* error) {
static void destroy_call(void* call, grpc_error* /*error*/) {
GPR_TIMER_SCOPE("destroy_call", 0);
size_t i;
int ii;
@ -617,7 +617,7 @@ grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
// This is called via the call combiner to start sending a batch down
// the filter stack.
static void execute_batch_in_call_combiner(void* arg, grpc_error* ignored) {
static void execute_batch_in_call_combiner(void* arg, grpc_error* /*ignored*/) {
GPR_TIMER_SCOPE("execute_batch_in_call_combiner", 0);
grpc_transport_stream_op_batch* batch =
static_cast<grpc_transport_stream_op_batch*>(arg);
@ -678,7 +678,7 @@ typedef struct {
// The on_complete callback used when sending a cancel_stream batch down
// the filter stack. Yields the call combiner when the batch is done.
static void done_termination(void* arg, grpc_error* error) {
static void done_termination(void* arg, grpc_error* /*error*/) {
cancel_state* state = static_cast<cancel_state*>(arg);
GRPC_CALL_COMBINER_STOP(&state->call->call_combiner,
"on_complete for cancel_stream op");
@ -805,9 +805,10 @@ uint32_t grpc_call_test_only_get_message_flags(grpc_call* call) {
return flags;
}
static void destroy_encodings_accepted_by_peer(void* p) { return; }
static void destroy_encodings_accepted_by_peer(void* /*p*/) { return; }
static void set_encodings_accepted_by_peer(grpc_call* call, grpc_mdelem mdel,
static void set_encodings_accepted_by_peer(grpc_call* /*call*/,
grpc_mdelem mdel,
uint32_t* encodings_accepted_by_peer,
bool stream_encoding) {
size_t i;
@ -1152,7 +1153,7 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
}
static void finish_batch_completion(void* user_data,
grpc_cq_completion* storage) {
grpc_cq_completion* /*storage*/) {
batch_control* bctl = static_cast<batch_control*>(user_data);
grpc_call* call = bctl->call;
bctl->call = nullptr;
@ -1545,7 +1546,7 @@ static void finish_batch(void* bctlp, grpc_error* error) {
finish_batch_step(bctl);
}
static void free_no_op_completion(void* p, grpc_cq_completion* completion) {
static void free_no_op_completion(void* /*p*/, grpc_cq_completion* completion) {
gpr_free(completion);
}
@ -1563,7 +1564,7 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
grpc_transport_stream_op_batch* stream_op;
grpc_transport_stream_op_batch_payload* stream_op_payload;
GRPC_CALL_LOG_BATCH(GPR_INFO, call, ops, nops, notify_tag);
GRPC_CALL_LOG_BATCH(GPR_INFO, ops, nops);
if (nops == 0) {
if (!is_notify_tag_closure) {
@ -1748,8 +1749,8 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
call->sent_final_op = true;
GPR_ASSERT(call->send_extra_metadata_count == 0);
call->send_extra_metadata_count = 1;
call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
call->channel, op->data.send_status_from_server.status);
call->send_extra_metadata[0].md = grpc_get_reffed_status_elem(
op->data.send_status_from_server.status);
grpc_error* status_error =
op->data.send_status_from_server.status == GRPC_STATUS_OK
? GRPC_ERROR_NONE

@ -90,8 +90,7 @@ void grpc_call_cancel_internal(grpc_call* call);
grpc_call* grpc_call_from_top_element(grpc_call_element* surface_element);
void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity,
grpc_call* call, const grpc_op* ops, size_t nops,
void* tag);
const grpc_op* ops, size_t nops);
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
@ -101,11 +100,11 @@ void grpc_call_context_set(grpc_call* call, grpc_context_index elem,
/* Get a context pointer. */
void* grpc_call_context_get(grpc_call* call, grpc_context_index elem);
#define GRPC_CALL_LOG_BATCH(sev, call, ops, nops, tag) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace)) { \
grpc_call_log_batch(sev, call, ops, nops, tag); \
} \
#define GRPC_CALL_LOG_BATCH(sev, ops, nops) \
do { \
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace)) { \
grpc_call_log_batch(sev, ops, nops); \
} \
} while (0)
uint8_t grpc_call_is_client(grpc_call* call);

@ -108,8 +108,7 @@ char* grpc_op_string(const grpc_op* op) {
}
void grpc_call_log_batch(const char* file, int line, gpr_log_severity severity,
grpc_call* call, const grpc_op* ops, size_t nops,
void* tag) {
const grpc_op* ops, size_t nops) {
char* tmp;
size_t i;
for (i = 0; i < nops; i++) {

@ -463,7 +463,7 @@ grpc_call* grpc_channel_create_registered_call(
return call;
}
static void destroy_channel(void* arg, grpc_error* error) {
static void destroy_channel(void* arg, grpc_error* /*error*/) {
grpc_channel* channel = static_cast<grpc_channel*>(arg);
if (channel->channelz_node != nullptr) {
if (channel->channelz_node->parent_uuid() > 0) {
@ -512,11 +512,3 @@ void grpc_channel_destroy(grpc_channel* channel) {
GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
}
grpc_mdelem grpc_channel_get_reffed_status_elem_slowpath(grpc_channel* channel,
int i) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(i, tmp);
return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
grpc_core::UnmanagedMemorySlice(tmp));
}

@ -55,25 +55,6 @@ grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel);
grpc_core::channelz::ChannelNode* grpc_channel_get_channelz_node(
grpc_channel* channel);
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
grpc_mdelem grpc_channel_get_reffed_status_elem_slowpath(grpc_channel* channel,
int status_code);
inline grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel,
int status_code) {
switch (status_code) {
case 0:
return GRPC_MDELEM_GRPC_STATUS_0;
case 1:
return GRPC_MDELEM_GRPC_STATUS_1;
case 2:
return GRPC_MDELEM_GRPC_STATUS_2;
}
return grpc_channel_get_reffed_status_elem_slowpath(channel, status_code);
}
size_t grpc_channel_get_call_size_estimate(grpc_channel* channel);
void grpc_channel_update_call_size_estimate(grpc_channel* channel, size_t size);

@ -52,3 +52,10 @@ grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) {
md, destroy_status, (void*)static_cast<intptr_t>(status + STATUS_OFFSET));
return static_cast<grpc_status_code>(status);
}
grpc_mdelem grpc_get_reffed_status_elem_slowpath(int status_code) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(status_code, tmp);
return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
grpc_core::UnmanagedMemorySlice(tmp));
}

@ -24,7 +24,25 @@
#include <grpc/status.h>
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md);
/** Get a grpc_mdelem of grpc-status: X where X is the numeric value of
status_code.
The returned elem is owned by the caller. */
grpc_mdelem grpc_get_reffed_status_elem_slowpath(int status_code);
inline grpc_mdelem grpc_get_reffed_status_elem(int status_code) {
switch (status_code) {
case 0:
return GRPC_MDELEM_GRPC_STATUS_0;
case 1:
return GRPC_MDELEM_GRPC_STATUS_1;
case 2:
return GRPC_MDELEM_GRPC_STATUS_2;
}
return grpc_get_reffed_status_elem_slowpath(status_code);
}
#endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_METADATA_H */

Loading…
Cancel
Save