Use static_cast rather than reinterpret_cast whenever possible

pull/14597/head
Vijay Pai 7 years ago
parent 673439d5bd
commit 7fed69b7ad
  1. 27
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  2. 4
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  3. 4
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  4. 2
      src/core/lib/gprpp/manual_constructor.h
  5. 6
      src/core/lib/iomgr/udp_server.cc
  6. 10
      src/core/lib/surface/lame_client.cc
  7. 4
      src/cpp/client/secure_credentials.cc
  8. 12
      src/cpp/common/channel_filter.h
  9. 3
      src/cpp/server/dynamic_thread_pool.cc
  10. 4
      src/cpp/server/secure_server_credentials.cc
  11. 4
      src/cpp/thread_manager/thread_manager.cc
  12. 8
      src/cpp/util/slice_cc.cc
  13. 0
      src/php/ext/grpc/call_credentials.h
  14. 0
      src/php/ext/grpc/channel.h
  15. 0
      src/php/ext/grpc/channel_credentials.h
  16. 0
      src/php/ext/grpc/server.h
  17. 0
      src/php/ext/grpc/server_credentials.h
  18. 0
      src/php/ext/grpc/timeval.h
  19. 4
      test/core/end2end/cq_verifier_uv.cc
  20. 2
      test/core/surface/completion_queue_threading_test.cc
  21. 19
      test/core/util/mock_endpoint.cc
  22. 4
      test/cpp/qps/client_async.cc
  23. 6
      test/cpp/qps/server_async.cc
  24. 4
      test/cpp/util/slice_test.cc

@ -191,7 +191,7 @@ class GrpcLb : public LoadBalancingPolicy {
~BalancerCallState();
GrpcLb* grpclb_policy() const {
return reinterpret_cast<GrpcLb*>(grpclb_policy_.get());
return static_cast<GrpcLb*>(grpclb_policy_.get());
}
void ScheduleNextClientLoadReportLocked();
@ -651,7 +651,7 @@ void GrpcLb::BalancerCallState::ScheduleNextClientLoadReportLocked() {
void GrpcLb::BalancerCallState::MaybeSendClientLoadReportLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = reinterpret_cast<BalancerCallState*>(arg);
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
lb_calld->client_load_report_timer_callback_pending_ = false;
if (error != GRPC_ERROR_NONE || lb_calld != grpclb_policy->lb_calld_.get()) {
@ -721,7 +721,7 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() {
void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = reinterpret_cast<BalancerCallState*>(arg);
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
lb_calld->send_message_payload_ = nullptr;
@ -734,7 +734,7 @@ void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(void* arg,
void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
grpc_error* error) {
BalancerCallState* lb_calld = reinterpret_cast<BalancerCallState*>(arg);
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
grpc_byte_buffer_destroy(lb_calld->send_message_payload_);
lb_calld->send_message_payload_ = nullptr;
// If we attempted to send a client load report before the initial request was
@ -749,7 +749,7 @@ void GrpcLb::BalancerCallState::OnInitialRequestSentLocked(void* arg,
void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = reinterpret_cast<BalancerCallState*>(arg);
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
// Empty payload means the LB call was cancelled.
if (lb_calld != grpclb_policy->lb_calld_.get() ||
@ -882,7 +882,7 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked(
void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
void* arg, grpc_error* error) {
BalancerCallState* lb_calld = reinterpret_cast<BalancerCallState*>(arg);
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
GrpcLb* grpclb_policy = lb_calld->grpclb_policy();
GPR_ASSERT(lb_calld->lb_call_ != nullptr);
if (grpc_lb_glb_trace.enabled()) {
@ -1283,7 +1283,7 @@ void GrpcLb::ProcessChannelArgsLocked(const grpc_channel_args& args) {
return;
}
const grpc_lb_addresses* addresses =
reinterpret_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
static_cast<const grpc_lb_addresses*>(arg->value.pointer.p);
// Update fallback address list.
if (fallback_backend_addresses_ != nullptr) {
grpc_lb_addresses_destroy(fallback_backend_addresses_);
@ -1426,7 +1426,7 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
}
void GrpcLb::OnBalancerCallRetryTimerLocked(void* arg, grpc_error* error) {
GrpcLb* grpclb_policy = reinterpret_cast<GrpcLb*>(arg);
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
grpclb_policy->retry_timer_callback_pending_ = false;
if (!grpclb_policy->shutting_down_ && error == GRPC_ERROR_NONE &&
grpclb_policy->lb_calld_ == nullptr) {
@ -1503,8 +1503,7 @@ grpc_error* AddLbTokenToInitialMetadata(
// Destroy function used when embedding client stats in call context.
void DestroyClientStats(void* arg) {
grpc_grpclb_client_stats_unref(
reinterpret_cast<grpc_grpclb_client_stats*>(arg));
grpc_grpclb_client_stats_unref(static_cast<grpc_grpclb_client_stats*>(arg));
}
void GrpcLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
@ -1540,7 +1539,7 @@ void GrpcLb::PendingPickSetMetadataAndContext(PendingPick* pp) {
* reference to its associated round robin instance. We wrap this closure in
* order to unref the round robin instance upon its invocation */
void GrpcLb::OnPendingPickComplete(void* arg, grpc_error* error) {
PendingPick* pp = reinterpret_cast<PendingPick*>(arg);
PendingPick* pp = static_cast<PendingPick*>(arg);
PendingPickSetMetadataAndContext(pp);
GRPC_CLOSURE_SCHED(pp->original_on_complete, GRPC_ERROR_REF(error));
Delete(pp);
@ -1739,7 +1738,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg,
grpc_error* error) {
GrpcLb* grpclb_policy = reinterpret_cast<GrpcLb*>(arg);
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
if (grpclb_policy->shutting_down_ || error != GRPC_ERROR_NONE) {
grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_reresolution_requested");
return;
@ -1820,7 +1819,7 @@ void GrpcLb::UpdateConnectivityStateFromRoundRobinPolicyLocked(
void GrpcLb::OnRoundRobinConnectivityChangedLocked(void* arg,
grpc_error* error) {
GrpcLb* grpclb_policy = reinterpret_cast<GrpcLb*>(arg);
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
if (grpclb_policy->shutting_down_) {
grpclb_policy->Unref(DEBUG_LOCATION, "on_rr_connectivity_changed");
return;
@ -1848,7 +1847,7 @@ class GrpcLbFactory : public LoadBalancingPolicyFactory {
return nullptr;
}
grpc_lb_addresses* addresses =
reinterpret_cast<grpc_lb_addresses*>(arg->value.pointer.p);
static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;

@ -393,8 +393,8 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
}
void PickFirst::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = reinterpret_cast<grpc_lb_subchannel_data*>(arg);
PickFirst* p = reinterpret_cast<PickFirst*>(sd->subchannel_list->policy);
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
PickFirst* p = static_cast<PickFirst*>(sd->subchannel_list->policy);
if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR

@ -412,8 +412,8 @@ void RoundRobin::UpdateConnectivityStatusLocked(grpc_lb_subchannel_data* sd,
}
void RoundRobin::OnConnectivityChangedLocked(void* arg, grpc_error* error) {
grpc_lb_subchannel_data* sd = reinterpret_cast<grpc_lb_subchannel_data*>(arg);
RoundRobin* p = reinterpret_cast<RoundRobin*>(sd->subchannel_list->policy);
grpc_lb_subchannel_data* sd = static_cast<grpc_lb_subchannel_data*>(arg);
RoundRobin* p = static_cast<RoundRobin*>(sd->subchannel_list->policy);
if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(
GPR_DEBUG,

@ -156,7 +156,7 @@ class PolymorphicManualConstructor {
static_assert(
manual_ctor_impl::is_one_of<DerivedType, DerivedTypes...>::value,
"DerivedType must be one of the predeclared DerivedTypes");
GPR_ASSERT(reinterpret_cast<BaseType*>(static_cast<DerivedType*>(p)) == p);
GPR_ASSERT(static_cast<BaseType*>(p) == p);
}
typename std::aligned_storage<

@ -365,7 +365,7 @@ error:
}
static void do_read(void* arg, grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
grpc_udp_listener* sp = static_cast<grpc_udp_listener*>(arg);
GPR_ASSERT(sp->read_cb && error == GRPC_ERROR_NONE);
/* TODO: the reason we hold server->mu here is merely to prevent fd
* shutdown while we are reading. However, it blocks do_write(). Switch to
@ -419,7 +419,7 @@ static void on_read(void* arg, grpc_error* error) {
// Wrapper of grpc_fd_notify_on_write() with a grpc_closure callback interface.
void fd_notify_on_write_wrapper(void* arg, grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
grpc_udp_listener* sp = static_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&sp->server->mu);
if (!sp->notify_on_write_armed) {
grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
@ -429,7 +429,7 @@ void fd_notify_on_write_wrapper(void* arg, grpc_error* error) {
}
static void do_write(void* arg, grpc_error* error) {
grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
grpc_udp_listener* sp = static_cast<grpc_udp_listener*>(arg);
gpr_mu_lock(&sp->server->mu);
if (sp->already_shutdown) {
// If fd has been shutdown, don't write any more and re-arm notification.

@ -52,14 +52,14 @@ struct ChannelData {
};
static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) {
CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
bool expected = false;
if (!calld->filled_metadata.compare_exchange_strong(
expected, true, grpc_core::memory_order_relaxed,
grpc_core::memory_order_relaxed)) {
return;
}
ChannelData* chand = reinterpret_cast<ChannelData*>(elem->channel_data);
ChannelData* chand = static_cast<ChannelData*>(elem->channel_data);
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
@ -78,7 +78,7 @@ static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) {
static void lame_start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
if (op->recv_initial_metadata) {
fill_metadata(elem,
op->payload->recv_initial_metadata.recv_initial_metadata);
@ -119,7 +119,7 @@ static void lame_start_transport_op(grpc_channel_element* elem,
static grpc_error* init_call_elem(grpc_call_element* elem,
const grpc_call_element_args* args) {
CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
calld->call_combiner = args->call_combiner;
return GRPC_ERROR_NONE;
}
@ -172,7 +172,7 @@ grpc_channel* grpc_lame_client_channel_create(const char* target,
"error_message=%s)",
3, (target, (int)error_code, error_message));
GPR_ASSERT(elem->filter == &grpc_lame_filter);
auto chand = reinterpret_cast<grpc_core::ChannelData*>(elem->channel_data);
auto chand = static_cast<grpc_core::ChannelData*>(elem->channel_data);
chand->error_code = error_code;
chand->error_message = error_message;

@ -168,7 +168,7 @@ std::shared_ptr<CallCredentials> CompositeCallCredentials(
void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) {
if (wrapper == nullptr) return;
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
static_cast<MetadataCredentialsPluginWrapper*>(wrapper);
delete w;
}
@ -180,7 +180,7 @@ int MetadataCredentialsPluginWrapper::GetMetadata(
const char** error_details) {
GPR_ASSERT(wrapper);
MetadataCredentialsPluginWrapper* w =
reinterpret_cast<MetadataCredentialsPluginWrapper*>(wrapper);
static_cast<MetadataCredentialsPluginWrapper*>(wrapper);
if (!w->plugin_) {
*num_creds_md = 0;
*status = GRPC_STATUS_OK;

@ -283,7 +283,7 @@ class ChannelFilter final {
static void DestroyChannelElement(grpc_channel_element* elem) {
ChannelDataType* channel_data =
reinterpret_cast<ChannelDataType*>(elem->channel_data);
static_cast<ChannelDataType*>(elem->channel_data);
channel_data->Destroy(elem);
channel_data->~ChannelDataType();
}
@ -291,7 +291,7 @@ class ChannelFilter final {
static void StartTransportOp(grpc_channel_element* elem,
grpc_transport_op* op) {
ChannelDataType* channel_data =
reinterpret_cast<ChannelDataType*>(elem->channel_data);
static_cast<ChannelDataType*>(elem->channel_data);
TransportOp op_wrapper(op);
channel_data->StartTransportOp(elem, &op_wrapper);
}
@ -299,7 +299,7 @@ class ChannelFilter final {
static void GetChannelInfo(grpc_channel_element* elem,
const grpc_channel_info* channel_info) {
ChannelDataType* channel_data =
reinterpret_cast<ChannelDataType*>(elem->channel_data);
static_cast<ChannelDataType*>(elem->channel_data);
channel_data->GetInfo(elem, channel_info);
}
@ -315,21 +315,21 @@ class ChannelFilter final {
static void DestroyCallElement(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* then_call_closure) {
CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
call_data->Destroy(elem, final_info, then_call_closure);
call_data->~CallDataType();
}
static void StartTransportStreamOpBatch(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
TransportStreamOpBatch op_wrapper(op);
call_data->StartTransportStreamOpBatch(elem, &op_wrapper);
}
static void SetPollsetOrPollsetSet(grpc_call_element* elem,
grpc_polling_entity* pollent) {
CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
CallDataType* call_data = static_cast<CallDataType*>(elem->call_data);
call_data->SetPollsetOrPollsetSet(elem, pollent);
}
};

@ -30,8 +30,7 @@ DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
: pool_(pool),
thd_("dynamic thread pool thread",
[](void* th) {
reinterpret_cast<DynamicThreadPool::DynamicThread*>(th)
->ThreadFunc();
static_cast<DynamicThreadPool::DynamicThread*>(th)->ThreadFunc();
},
this) {
thd_.Start();

@ -29,14 +29,14 @@
namespace grpc {
void AuthMetadataProcessorAyncWrapper::Destroy(void* wrapper) {
auto* w = reinterpret_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
auto* w = static_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
delete w;
}
void AuthMetadataProcessorAyncWrapper::Process(
void* wrapper, grpc_auth_context* context, const grpc_metadata* md,
size_t num_md, grpc_process_auth_metadata_done_cb cb, void* user_data) {
auto* w = reinterpret_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
auto* w = static_cast<AuthMetadataProcessorAyncWrapper*>(wrapper);
if (!w->processor_) {
// Early exit.
cb(user_data, nullptr, 0, nullptr, 0, GRPC_STATUS_OK, nullptr);

@ -33,9 +33,7 @@ ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr)
// ~WorkerThread().
thd_ = grpc_core::Thread(
"sync server thread",
[](void* th) {
reinterpret_cast<ThreadManager::WorkerThread*>(th)->Run();
},
[](void* th) { static_cast<ThreadManager::WorkerThread*>(th)->Run(); },
this);
thd_.Start();
}

@ -32,15 +32,15 @@ Slice::Slice(grpc_slice slice, StealRef) : slice_(slice) {}
Slice::Slice(size_t len) : slice_(grpc_slice_malloc(len)) {}
Slice::Slice(const void* buf, size_t len)
: slice_(grpc_slice_from_copied_buffer(reinterpret_cast<const char*>(buf),
len)) {}
: slice_(
grpc_slice_from_copied_buffer(static_cast<const char*>(buf), len)) {}
Slice::Slice(const grpc::string& str)
: slice_(grpc_slice_from_copied_buffer(str.c_str(), str.length())) {}
Slice::Slice(const void* buf, size_t len, StaticSlice)
: slice_(grpc_slice_from_static_buffer(reinterpret_cast<const char*>(buf),
len)) {}
: slice_(
grpc_slice_from_static_buffer(static_cast<const char*>(buf), len)) {}
Slice::Slice(const Slice& other) : slice_(grpc_slice_ref(other.slice_)) {}

@ -58,7 +58,7 @@ static void timer_close_cb(uv_handle_t* handle) {
void cq_verifier_destroy(cq_verifier* v) {
cq_verify(v);
uv_close((uv_handle_t*)&v->timer, timer_close_cb);
while (reinterpret_cast<timer_state>(v->timer.data) != TIMER_CLOSED) {
while (static_cast<timer_state>(v->timer.data) != TIMER_CLOSED) {
uv_run(uv_default_loop(), UV_RUN_NOWAIT);
}
gpr_free(v);
@ -85,7 +85,7 @@ grpc_event cq_verifier_next_event(cq_verifier* v, int timeout_seconds) {
ev = grpc_completion_queue_next(v->cq, gpr_inf_past(GPR_CLOCK_MONOTONIC),
NULL);
// Stop the loop if the timer goes off or we get a non-timeout event
while ((reinterpret_cast<timer_state>(v->timer.data) != TIMER_TRIGGERED) &&
while ((static_cast<timer_state>(v->timer.data) != TIMER_TRIGGERED) &&
ev.type == GRPC_QUEUE_TIMEOUT) {
uv_run(uv_default_loop(), UV_RUN_ONCE);
ev = grpc_completion_queue_next(v->cq, gpr_inf_past(GPR_CLOCK_MONOTONIC),

@ -219,7 +219,7 @@ static void test_threading(size_t producers, size_t consumers) {
"test_threading", producers, consumers);
/* start all threads: they will wait for phase1 */
grpc_core::Thread* threads = reinterpret_cast<grpc_core::Thread*>(
grpc_core::Thread* threads = static_cast<grpc_core::Thread*>(
gpr_malloc(sizeof(*threads) * (producers + consumers)));
for (i = 0; i < producers + consumers; i++) {
gpr_event_init(&options[i].on_started);

@ -30,7 +30,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/sockaddr.h"
typedef struct grpc_mock_endpoint {
typedef struct mock_endpoint {
grpc_endpoint base;
gpr_mu mu;
void (*on_write)(grpc_slice slice);
@ -38,11 +38,11 @@ typedef struct grpc_mock_endpoint {
grpc_slice_buffer* on_read_out;
grpc_closure* on_read;
grpc_resource_user* resource_user;
} grpc_mock_endpoint;
} mock_endpoint;
static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->read_buffer.count > 0) {
grpc_slice_buffer_swap(&m->read_buffer, slices);
@ -56,7 +56,7 @@ static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
for (size_t i = 0; i < slices->count; i++) {
m->on_write(slices->slices[i]);
}
@ -72,7 +72,7 @@ static void me_delete_from_pollset_set(grpc_endpoint* ep,
grpc_pollset_set* pollset) {}
static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->on_read) {
GRPC_CLOSURE_SCHED(m->on_read,
@ -86,7 +86,7 @@ static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
}
static void me_destroy(grpc_endpoint* ep) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
grpc_slice_buffer_destroy(&m->read_buffer);
grpc_resource_user_unref(m->resource_user);
gpr_free(m);
@ -97,7 +97,7 @@ static char* me_get_peer(grpc_endpoint* ep) {
}
static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
return m->resource_user;
}
@ -118,8 +118,7 @@ static const grpc_endpoint_vtable vtable = {
grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
grpc_resource_quota* resource_quota) {
grpc_mock_endpoint* m =
static_cast<grpc_mock_endpoint*>(gpr_malloc(sizeof(*m)));
mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
m->base.vtable = &vtable;
char* name;
gpr_asprintf(&name, "mock_endpoint_%" PRIxPTR, (intptr_t)m);
@ -133,7 +132,7 @@ grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
}
void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice) {
grpc_mock_endpoint* m = reinterpret_cast<grpc_mock_endpoint*>(ep);
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
gpr_mu_lock(&m->mu);
if (m->on_read != nullptr) {
grpc_slice_buffer_add(m->on_read_out, slice);

@ -50,9 +50,9 @@ class ClientRpcContext {
// next state, return false if done. Collect stats when appropriate
virtual bool RunNextState(bool, HistogramEntry* entry) = 0;
virtual void StartNewClone(CompletionQueue* cq) = 0;
static void* tag(ClientRpcContext* c) { return reinterpret_cast<void*>(c); }
static void* tag(ClientRpcContext* c) { return static_cast<void*>(c); }
static ClientRpcContext* detag(void* t) {
return reinterpret_cast<ClientRpcContext*>(t);
return static_cast<ClientRpcContext*>(t);
}
virtual void Start(CompletionQueue* cq, const ClientConfig& config) = 0;

@ -240,11 +240,9 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
private:
std::mutex mu_;
};
static void* tag(ServerRpcContext* func) {
return reinterpret_cast<void*>(func);
}
static void* tag(ServerRpcContext* func) { return static_cast<void*>(func); }
static ServerRpcContext* detag(void* tag) {
return reinterpret_cast<ServerRpcContext*>(tag);
return static_cast<ServerRpcContext*>(tag);
}
class ServerRpcContextUnaryImpl final : public ServerRpcContext {

@ -67,7 +67,7 @@ TEST_F(SliceTest, StaticBuf) {
TEST_F(SliceTest, SliceNew) {
char* x = new char[strlen(kContent) + 1];
strcpy(x, kContent);
Slice spp(x, strlen(x), [](void* p) { delete[] reinterpret_cast<char*>(p); });
Slice spp(x, strlen(x), [](void* p) { delete[] static_cast<char*>(p); });
CheckSlice(spp, kContent);
}
@ -86,7 +86,7 @@ TEST_F(SliceTest, SliceNewWithUserData) {
strcpy(t->x, kContent);
Slice spp(t->x, strlen(t->x),
[](void* p) {
auto* t = reinterpret_cast<stest*>(p);
auto* t = static_cast<stest*>(p);
delete[] t->x;
delete t;
},

Loading…
Cancel
Save