C++ cast for others

reviewable/pr24841/r1
Esun Kim 4 years ago
parent d2bff16c25
commit e0d8c498a6
  1. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  2. 3
      src/core/ext/transport/inproc/inproc_transport.cc
  3. 3
      src/core/lib/gpr/alloc.cc
  4. 4
      src/core/lib/gpr/log.cc
  5. 2
      src/core/lib/gpr/string.cc
  6. 2
      src/core/lib/gpr/sync.cc
  7. 13
      src/core/lib/iomgr/call_combiner.cc
  8. 3
      src/core/lib/iomgr/combiner.cc
  9. 7
      src/core/lib/iomgr/error.cc
  10. 26
      src/core/lib/iomgr/ev_epoll1_linux.cc
  11. 22
      src/core/lib/iomgr/ev_epollex_linux.cc
  12. 10
      src/core/lib/iomgr/ev_poll_posix.cc
  13. 3
      src/core/lib/iomgr/executor.cc
  14. 15
      src/core/lib/iomgr/lockfree_event.cc
  15. 2
      src/core/lib/iomgr/pollset_set_custom.cc
  16. 8
      src/core/lib/iomgr/resource_quota.cc
  17. 14
      src/core/lib/iomgr/sockaddr_utils.cc
  18. 10
      src/core/lib/iomgr/tcp_posix.cc
  19. 9
      src/core/lib/iomgr/unix_sockets_posix.cc
  20. 24
      src/core/lib/surface/call.cc
  21. 31
      src/core/lib/surface/completion_queue.cc
  22. 3
      src/core/lib/transport/status_metadata.cc
  23. 8
      src/core/tsi/fake_transport_security.cc
  24. 9
      src/core/tsi/ssl_transport_security.cc
  25. 2
      test/core/bad_ssl/bad_ssl_test.cc
  26. 7
      test/core/end2end/cq_verifier.cc
  27. 3
      test/core/end2end/tests/cancel_with_status.cc
  28. 2
      test/core/end2end/tests/max_concurrent_streams.cc
  29. 5
      test/core/end2end/tests/no_logging.cc
  30. 2
      test/core/end2end/tests/resource_quota_server.cc
  31. 2
      test/core/end2end/tests/shutdown_finishes_tags.cc
  32. 4
      test/core/fling/fling_stream_test.cc
  33. 4
      test/core/fling/fling_test.cc
  34. 2
      test/core/fling/server.cc
  35. 2
      test/core/gprpp/fork_test.cc
  36. 3
      test/core/http/httpcli_test.cc
  37. 2
      test/core/http/httpscli_test.cc
  38. 2
      test/core/iomgr/timer_list_test.cc
  39. 4
      test/core/transport/chttp2/stream_map_test.cc
  40. 11
      test/core/tsi/transport_security_test_lib.cc
  41. 3
      test/core/util/mock_endpoint.cc
  42. 5
      test/core/util/passthru_endpoint.cc
  43. 4
      test/cpp/common/channel_arguments_test.cc
  44. 4
      test/cpp/microbenchmarks/bm_fullstack_trickle.cc
  45. 24
      test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
  46. 10
      test/cpp/microbenchmarks/fullstack_streaming_pump.h
  47. 2
      test/cpp/server/server_builder_with_socket_mutator_test.cc
  48. 2
      test/cpp/util/grpc_cli.cc

@ -616,7 +616,7 @@ grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
metadata_buffer{grpc_chttp2_incoming_metadata_buffer(arena),
grpc_chttp2_incoming_metadata_buffer(arena)} {
if (server_data) {
id = static_cast<uint32_t>((uintptr_t)server_data);
id = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(server_data));
*t->accepting_stream = this;
grpc_chttp2_stream_map_add(&t->stream_map, id, this);
post_destructive_reclaimer(t);

@ -156,7 +156,8 @@ struct inproc_stream {
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, this);
} else {
// This is the server-side and is being called through accept_stream_cb
inproc_stream* cs = (inproc_stream*)server_data;
inproc_stream* cs = const_cast<inproc_stream*>(
static_cast<const inproc_stream*>(server_data));
other_side = cs;
// Ref the server-side stream on behalf of the client now
ref("inproc_init_stream:srv");

@ -66,7 +66,8 @@ void* gpr_malloc_aligned(size_t size, size_t alignment) {
GPR_ASSERT(((alignment - 1) & alignment) == 0); // Must be power of 2.
size_t extra = alignment - 1 + sizeof(void*);
void* p = gpr_malloc(size + extra);
void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1));
void** ret = reinterpret_cast<void**>(
(reinterpret_cast<uintptr_t>(p) + extra) & ~(alignment - 1));
ret[-1] = p;
return ret;
}

@ -38,7 +38,7 @@ static constexpr gpr_atm GPR_LOG_SEVERITY_UNSET = GPR_LOG_SEVERITY_ERROR + 10;
static constexpr gpr_atm GPR_LOG_SEVERITY_NONE = GPR_LOG_SEVERITY_ERROR + 11;
void gpr_default_log(gpr_log_func_args* args);
static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
static gpr_atm g_log_func = reinterpret_cast<gpr_atm>(gpr_default_log);
static gpr_atm g_min_severity_to_print = GPR_LOG_SEVERITY_UNSET;
static gpr_atm g_min_severity_to_print_stacktrace = GPR_LOG_SEVERITY_UNSET;
@ -80,7 +80,7 @@ void gpr_log_message(const char* file, int line, gpr_log_severity severity,
lfargs.line = line;
lfargs.severity = severity;
lfargs.message = message;
((gpr_log_func)gpr_atm_no_barrier_load(&g_log_func))(&lfargs);
reinterpret_cast<gpr_log_func>(gpr_atm_no_barrier_load(&g_log_func))(&lfargs);
}
void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) {

@ -55,7 +55,7 @@ char* gpr_strdup(const char* src) {
std::string gpr_format_timespec(gpr_timespec tm) {
char time_buffer[35];
char ns_buffer[11]; // '.' + 9 digits of precision
struct tm* tm_info = localtime((const time_t*)&tm.tv_sec);
struct tm* tm_info = localtime(reinterpret_cast<time_t*>(&tm.tv_sec));
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info);
snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec);
// This loop trims off trailing zeros by inserting a null character that the

@ -48,7 +48,7 @@ static void event_initialize(void) {
/* Hash ev into an element of sync_array[]. */
static struct sync_array_s* hash(gpr_event* ev) {
return &sync_array[((uintptr_t)ev) % event_sync_partitions];
return &sync_array[reinterpret_cast<uintptr_t>(ev) % event_sync_partitions];
}
void gpr_event_init(gpr_event* ev) {

@ -34,13 +34,14 @@ namespace {
grpc_error* DecodeCancelStateError(gpr_atm cancel_state) {
if (cancel_state & 1) {
return (grpc_error*)(cancel_state & ~static_cast<gpr_atm>(1));
return reinterpret_cast<grpc_error*>(cancel_state &
~static_cast<gpr_atm>(1));
}
return GRPC_ERROR_NONE;
}
gpr_atm EncodeCancelStateError(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
return static_cast<gpr_atm>(1) | reinterpret_cast<gpr_atm>(error);
}
} // namespace
@ -203,7 +204,8 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&cancel_state_, original_state, (gpr_atm)closure)) {
if (gpr_atm_full_cas(&cancel_state_, original_state,
reinterpret_cast<gpr_atm>(closure))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
this, closure);
@ -212,7 +214,7 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
// closure with GRPC_ERROR_NONE. This allows callers to clean
// up any resources they may be holding for the callback.
if (original_state != 0) {
closure = (grpc_closure*)original_state;
closure = reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p", this,
@ -239,7 +241,8 @@ void CallCombiner::Cancel(grpc_error* error) {
if (gpr_atm_full_cas(&cancel_state_, original_state,
EncodeCancelStateError(error))) {
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
grpc_closure* notify_on_cancel =
reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",

@ -146,7 +146,8 @@ static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl,
// offload for one or two actions, and that's fine
gpr_atm initiator =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
if (initiator != 0 &&
initiator != reinterpret_cast<gpr_atm>(grpc_core::ExecCtx::Get())) {
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
}
}

@ -427,8 +427,8 @@ static grpc_error* copy_error_and_unref(grpc_error* in) {
// bulk memcpy of the rest of the struct.
// NOLINTNEXTLINE(bugprone-sizeof-expression)
size_t skip = sizeof(&out->atomics);
memcpy(reinterpret_cast<void*>((uintptr_t)out + skip),
(void*)((uintptr_t)in + skip),
memcpy(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(out) + skip),
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(in) + skip),
sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip);
// manually set the atomics and the new capacity
gpr_atm_no_barrier_store(&out->atomics.error_string, 0);
@ -766,7 +766,8 @@ const char* grpc_error_string(grpc_error* err) {
char* out = finish_kvs(&kvs);
if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) {
if (!gpr_atm_rel_cas(&err->atomics.error_string, 0,
reinterpret_cast<gpr_atm>(out))) {
gpr_free(out);
out = reinterpret_cast<char*>(gpr_atm_acq_load(&err->atomics.error_string));
}

@ -803,7 +803,8 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */
if (worker->state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
gpr_atm_no_barrier_cas(&g_active_poller, 0,
reinterpret_cast<gpr_atm>(worker))) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
} else {
@ -885,8 +886,9 @@ static bool check_neighborhood_for_available_poller(
do {
switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
if (gpr_atm_no_barrier_cas(
&g_active_poller, 0,
reinterpret_cast<gpr_atm>(inspect_worker))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker);
@ -944,7 +946,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
SET_KICK_STATE(worker, KICKED);
grpc_closure_list_move(&worker->schedule_on_end_work,
grpc_core::ExecCtx::Get()->closure_list());
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (gpr_atm_no_barrier_load(&g_active_poller) ==
reinterpret_cast<gpr_atm>(worker)) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
@ -1089,7 +1092,8 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
if (specific_worker == nullptr) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (gpr_tls_get(&g_current_thread_pollset) !=
reinterpret_cast<intptr_t>(pollset)) {
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
@ -1116,8 +1120,9 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
goto done;
} else if (root_worker == next_worker && // only try and wake up a poller
// if there is no next worker
root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
&g_active_poller)) {
root_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", root_worker);
@ -1181,7 +1186,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
@ -1189,7 +1194,8 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick active poller");
@ -1224,7 +1230,7 @@ static void pollset_add_fd(grpc_pollset* /*pollset*/, grpc_fd* /*fd*/) {}
*/
static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
return reinterpret_cast<grpc_pollset_set*>(static_cast<intptr_t>(0xdeafbeef));
}
static void pollset_set_destroy(grpc_pollset_set* /*pss*/) {}

@ -577,7 +577,8 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
}
struct epoll_event ev;
ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = reinterpret_cast<void*>(1 | (intptr_t) & (*p)->wakeup);
ev.data.ptr =
reinterpret_cast<void*>(1 | reinterpret_cast<intptr_t>(&(*p)->wakeup));
if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
GRPC_FD_TRACE(
@ -692,7 +693,8 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (gpr_tls_get(&g_current_thread_worker) ==
reinterpret_cast<intptr_t>(specific_worker)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
}
@ -735,7 +737,8 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
}
if (specific_worker == nullptr) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (gpr_tls_get(&g_current_thread_pollset) !=
reinterpret_cast<intptr_t>(pollset)) {
if (pollset->root_worker == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
@ -881,15 +884,16 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
int n = pollable_obj->event_cursor++;
struct epoll_event* ev = &pollable_obj->events[n];
void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
if (1 & reinterpret_cast<intptr_t>(data_ptr)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
(grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) &
(intptr_t)data_ptr)),
err_desc);
append_error(
&error,
grpc_wakeup_fd_consume_wakeup(reinterpret_cast<grpc_wakeup_fd*>(
~static_cast<intptr_t>(1) &
reinterpret_cast<intptr_t>(data_ptr))),
err_desc);
} else {
grpc_fd* fd =
reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);

@ -775,7 +775,7 @@ static grpc_error* pollset_kick_ext(grpc_pollset* p,
}
p->kicked_without_pollers = true;
} else if (gpr_tls_get(&g_current_thread_worker) !=
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = true;
@ -792,18 +792,20 @@ static grpc_error* pollset_kick_ext(grpc_pollset* p,
kick_append_error(&error,
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
} else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
} else if (gpr_tls_get(&g_current_thread_poller) !=
reinterpret_cast<intptr_t>(p)) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != nullptr) {
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (gpr_tls_get(&g_current_thread_worker) ==
reinterpret_cast<intptr_t>(specific_worker)) {
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
push_back_worker(p, specific_worker);
specific_worker = nullptr;
}

@ -283,7 +283,8 @@ void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
return;
}
ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
ThreadState* ts =
reinterpret_cast<ThreadState*>(gpr_tls_get(&g_this_thread_state));
if (ts == nullptr) {
ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
cur_thread_count)];

@ -108,7 +108,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
The release itself pairs with the acquire half of a set_ready full
barrier. */
if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) {
if (gpr_atm_rel_cas(&state_, kClosureNotReady,
reinterpret_cast<gpr_atm>(closure))) {
return; /* Successful. Return */
}
@ -137,7 +138,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
if ((curr & kShutdownBit) > 0) {
grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
grpc_error* shutdown_err =
reinterpret_cast<grpc_error*>(curr & ~kShutdownBit);
ExecCtx::Run(DEBUG_LOCATION, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@ -157,14 +159,14 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
}
bool LockfreeEvent::SetShutdown(grpc_error* shutdown_error) {
gpr_atm new_state = (gpr_atm)shutdown_error | kShutdownBit;
gpr_atm new_state = reinterpret_cast<gpr_atm>(shutdown_error) | kShutdownBit;
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "LockfreeEvent::SetShutdown: %p curr=%p err=%s",
&state_, reinterpret_cast<void*>(curr),
grpc_error_string(shutdown_err));
grpc_error_string(shutdown_error));
}
switch (curr) {
case kClosureReady:
@ -191,7 +193,7 @@ bool LockfreeEvent::SetShutdown(grpc_error* shutdown_error) {
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(&state_, curr, new_state)) {
ExecCtx::Run(DEBUG_LOCATION, (grpc_closure*)curr,
ExecCtx::Run(DEBUG_LOCATION, reinterpret_cast<grpc_closure*>(curr),
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_error, 1));
return true;
@ -241,7 +243,8 @@ void LockfreeEvent::SetReady() {
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
ExecCtx::Run(DEBUG_LOCATION, (grpc_closure*)curr, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, reinterpret_cast<grpc_closure*>(curr),
GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing

@ -23,7 +23,7 @@
#include "src/core/lib/iomgr/pollset_set.h"
static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
return reinterpret_cast<grpc_pollset_set*>(static_cast<intptr_t>(0xdeafbeef));
}
static void pollset_set_destroy(grpc_pollset_set* /*pollset_set*/) {}

@ -659,8 +659,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
if (name != nullptr) {
resource_quota->name = name;
} else {
resource_quota->name =
absl::StrCat("anonymous_pool_", (intptr_t)resource_quota);
resource_quota->name = absl::StrCat(
"anonymous_pool_", reinterpret_cast<intptr_t>(resource_quota));
}
GRPC_CLOSURE_INIT(&resource_quota->rq_step_closure, rq_step, resource_quota,
nullptr);
@ -807,8 +807,8 @@ grpc_resource_user* grpc_resource_user_create(
if (name != nullptr) {
resource_user->name = name;
} else {
resource_user->name =
absl::StrCat("anonymous_resource_user_", (intptr_t)resource_user);
resource_user->name = absl::StrCat(
"anonymous_resource_user_", reinterpret_cast<intptr_t>(resource_user));
}
return resource_user;
}

@ -260,9 +260,11 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case GRPC_AF_INET:
return grpc_ntohs(((grpc_sockaddr_in*)addr)->sin_port);
return grpc_ntohs(
(reinterpret_cast<const grpc_sockaddr_in*>(addr))->sin_port);
case GRPC_AF_INET6:
return grpc_ntohs(((grpc_sockaddr_in6*)addr)->sin6_port);
return grpc_ntohs(
(reinterpret_cast<const grpc_sockaddr_in6*>(addr))->sin6_port);
default:
if (grpc_is_unix_socket(resolved_addr)) {
return 1;
@ -275,17 +277,17 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr,
int port) {
const grpc_sockaddr* addr =
reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
grpc_sockaddr* addr = const_cast<grpc_sockaddr*>(
reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr));
switch (addr->sa_family) {
case GRPC_AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
((grpc_sockaddr_in*)addr)->sin_port =
(reinterpret_cast<grpc_sockaddr_in*>(addr))->sin_port =
grpc_htons(static_cast<uint16_t>(port));
return 1;
case GRPC_AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
((grpc_sockaddr_in6*)addr)->sin6_port =
(reinterpret_cast<grpc_sockaddr_in6*>(addr))->sin6_port =
grpc_htons(static_cast<uint16_t>(port));
return 1;
default:

@ -465,7 +465,8 @@ static void run_poller(void* bp, grpc_error* /*error_ignored*/) {
if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
bool cas_ok =
gpr_atm_full_cas(&g_backup_poller, reinterpret_cast<gpr_atm>(p), 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
@ -487,7 +488,8 @@ static void run_poller(void* bp, grpc_error* /*error_ignored*/) {
}
static void drop_uncovered(grpc_tcp* /*tcp*/) {
backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
backup_poller* p =
reinterpret_cast<backup_poller*>(gpr_atm_acq_load(&g_backup_poller));
gpr_atm old_count =
gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
@ -526,8 +528,8 @@ static void cover_self(grpc_tcp* tcp) {
GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
} else {
while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) ==
nullptr) {
while ((p = reinterpret_cast<backup_poller*>(
gpr_atm_acq_load(&g_backup_poller))) == nullptr) {
// spin waiting for backup poller
}
}

@ -96,8 +96,10 @@ std::string grpc_sockaddr_to_uri_unix_if_possible(
if (addr->sa_family != AF_UNIX) {
return "";
}
if (((struct sockaddr_un*)addr)->sun_path[0] == '\0' &&
((struct sockaddr_un*)addr)->sun_path[1] != '\0') {
if ((reinterpret_cast<const struct sockaddr_un*>(addr))->sun_path[0] ==
'\0' &&
(reinterpret_cast<const struct sockaddr_un*>(addr))->sun_path[1] !=
'\0') {
const struct sockaddr_un* un =
reinterpret_cast<const struct sockaddr_un*>(resolved_addr->addr);
return absl::StrCat(
@ -105,7 +107,8 @@ std::string grpc_sockaddr_to_uri_unix_if_possible(
absl::string_view(un->sun_path + 1,
resolved_addr->len - sizeof(un->sun_family) - 1));
}
return absl::StrCat("unix:", ((struct sockaddr_un*)addr)->sun_path);
return absl::StrCat(
"unix:", (reinterpret_cast<const struct sockaddr_un*>(addr))->sun_path);
}
#endif

@ -310,20 +310,24 @@ void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
}
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
parent_call* p =
reinterpret_cast<parent_call*>(gpr_atm_acq_load(&call->parent_call_atm));
if (p == nullptr) {
p = call->arena->New<parent_call>();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
if (!gpr_atm_rel_cas(&call->parent_call_atm,
reinterpret_cast<gpr_atm>(nullptr),
reinterpret_cast<gpr_atm>(p))) {
p->~parent_call();
p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
p = reinterpret_cast<parent_call*>(
gpr_atm_acq_load(&call->parent_call_atm));
}
}
return p;
}
static parent_call* get_parent_call(grpc_call* call) {
return (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
return reinterpret_cast<parent_call*>(
gpr_atm_acq_load(&call->parent_call_atm));
}
size_t grpc_call_get_initial_size_estimate() {
@ -829,8 +833,8 @@ static void set_encodings_accepted_by_peer(grpc_call* /*call*/,
accepted_user_data =
grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
if (accepted_user_data != nullptr) {
*encodings_accepted_by_peer =
static_cast<uint32_t>(((uintptr_t)accepted_user_data) - 1);
*encodings_accepted_by_peer = static_cast<uint32_t>(
(reinterpret_cast<uintptr_t>(accepted_user_data)) - 1);
return;
}
@ -886,7 +890,8 @@ grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call) {
}
static grpc_linked_mdelem* linked_from_md(const grpc_metadata* md) {
return (grpc_linked_mdelem*)&md->internal_data;
return reinterpret_cast<grpc_linked_mdelem*>(
&const_cast<grpc_metadata*>(md)->internal_data);
}
static grpc_metadata* get_md_elem(grpc_metadata* metadata,
@ -1359,7 +1364,8 @@ static void receiving_stream_ready(void* bctlp, grpc_error* error) {
* object with rel_cas, and will not use it after the cas. Its corresponding
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == nullptr ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE,
reinterpret_cast<gpr_atm>(bctlp))) {
process_data_after_md(bctl);
}
}

@ -447,7 +447,8 @@ void grpc_cq_global_init() {
}
void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == nullptr) {
if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)0);
gpr_tls_set(&g_cached_cq, (intptr_t)cq);
}
@ -456,10 +457,10 @@ void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
void** tag, int* ok) {
grpc_cq_completion* storage =
(grpc_cq_completion*)gpr_tls_get(&g_cached_event);
reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event));
int ret = 0;
if (storage != nullptr &&
(grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
if (storage != nullptr && reinterpret_cast<grpc_completion_queue*>(
gpr_tls_get(&g_cached_cq)) == cq) {
*tag = storage->tag;
grpc_core::ExecCtx exec_ctx;
*ok = (storage->next & static_cast<uintptr_t>(1)) == 1;
@ -717,8 +718,10 @@ static void cq_end_op_for_next(
cq_check_tag(cq, tag, true); /* Used in debug builds only */
if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq &&
(grpc_cq_completion*)gpr_tls_get(&g_cached_event) == nullptr) {
if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
cq &&
reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event)) ==
nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)storage);
} else {
/* Add the completion to the queue */
@ -793,8 +796,8 @@ static void cq_end_op_for_pluck(
storage->tag = tag;
storage->done = done;
storage->done_arg = done_arg;
storage->next =
((uintptr_t)&cqd->completed_head) | (static_cast<uintptr_t>(is_success));
storage->next = (reinterpret_cast<uintptr_t>(&cqd->completed_head)) |
(static_cast<uintptr_t>(is_success));
gpr_mu_lock(cq->mu);
cq_check_tag(cq, tag, false); /* Used in debug builds only */
@ -802,7 +805,7 @@ static void cq_end_op_for_pluck(
/* Add to the list of completions */
cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
cqd->completed_tail->next =
((uintptr_t)storage) | (1u & cqd->completed_tail->next);
(reinterpret_cast<uintptr_t>(storage)) | (1u & cqd->completed_tail->next);
cqd->completed_tail = storage;
if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
@ -1176,8 +1179,8 @@ class ExecCtxPluck : public grpc_core::ExecCtx {
cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
grpc_cq_completion* c;
grpc_cq_completion* prev = &cqd->completed_head;
while ((c = (grpc_cq_completion*)(prev->next &
~static_cast<uintptr_t>(1))) !=
while ((c = reinterpret_cast<grpc_cq_completion*>(
prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & static_cast<uintptr_t>(1)) |
@ -1248,9 +1251,9 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
break;
}
prev = &cqd->completed_head;
while (
(c = (grpc_cq_completion*)(prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
while ((c = reinterpret_cast<grpc_cq_completion*>(
prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
if (c->tag == tag) {
prev->next = (prev->next & static_cast<uintptr_t>(1)) |
(c->next & ~static_cast<uintptr_t>(1));

@ -42,7 +42,8 @@ grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) {
}
void* user_data = grpc_mdelem_get_user_data(md, destroy_status);
if (user_data != nullptr) {
return static_cast<grpc_status_code>((intptr_t)user_data - STATUS_OFFSET);
return static_cast<grpc_status_code>(reinterpret_cast<intptr_t>(user_data) -
STATUS_OFFSET);
}
uint32_t status;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {

@ -524,7 +524,8 @@ static tsi_result fake_handshaker_result_create_frame_protector(
static tsi_result fake_handshaker_result_get_unused_bytes(
const tsi_handshaker_result* self, const unsigned char** bytes,
size_t* bytes_size) {
fake_handshaker_result* result = (fake_handshaker_result*)self;
fake_handshaker_result* result = reinterpret_cast<fake_handshaker_result*>(
const_cast<tsi_handshaker_result*>(self));
*bytes_size = result->unused_bytes_size;
*bytes = result->unused_bytes;
return TSI_OK;
@ -581,8 +582,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
static_cast<tsi_fake_handshake_message>(impl->next_message_to_send + 2);
const char* msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
result = tsi_fake_frame_set_data((unsigned char*)msg_string,
strlen(msg_string), &impl->outgoing_frame);
result = tsi_fake_frame_set_data(
reinterpret_cast<unsigned char*>(const_cast<char*>(msg_string)),
strlen(msg_string), &impl->outgoing_frame);
if (result != TSI_OK) return result;
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;

@ -340,8 +340,7 @@ static tsi_result add_pem_certificate(X509* cert, tsi_peer_property* property) {
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
TSI_X509_PEM_CERT_PROPERTY, (const char*)contents,
static_cast<size_t>(len), property);
TSI_X509_PEM_CERT_PROPERTY, contents, static_cast<size_t>(len), property);
BIO_free(bio);
return result;
}
@ -1208,8 +1207,8 @@ tsi_result tsi_ssl_get_cert_chain_contents(STACK_OF(X509) * peer_chain,
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
TSI_X509_PEM_CERT_CHAIN_PROPERTY, (const char*)contents,
static_cast<size_t>(len), property);
TSI_X509_PEM_CERT_CHAIN_PROPERTY, contents, static_cast<size_t>(len),
property);
BIO_free(bio);
return result;
}
@ -1713,7 +1712,7 @@ static int client_handshaker_factory_npn_callback(
const unsigned char* in, unsigned int inlen, void* arg) {
tsi_ssl_client_handshaker_factory* factory =
static_cast<tsi_ssl_client_handshaker_factory*>(arg);
return select_protocol_list((const unsigned char**)out, outlen,
return select_protocol_list(const_cast<const unsigned char**>(out), outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}

@ -147,7 +147,7 @@ int main(int argc, char** argv) {
args[1] = const_cast<char*>("--bind");
std::string joined = grpc_core::JoinHostPort("::", port);
args[2] = const_cast<char*>(joined.c_str());
svr = gpr_subprocess_create(4, (const char**)args);
svr = gpr_subprocess_create(4, const_cast<const char**>(args));
gpr_free(args[0]);
for (i = 3; i <= 4; i++) {

@ -186,14 +186,17 @@ int byte_buffer_eq_string(grpc_byte_buffer* bb, const char* str) {
return byte_buffer_eq_slice(bb, grpc_slice_from_copied_string(str));
}
static bool is_probably_integer(void* p) { return ((uintptr_t)p) < 1000000; }
static bool is_probably_integer(void* p) {
return (reinterpret_cast<uintptr_t>(p)) < 1000000;
}
namespace {
std::string ExpectationString(const Expectation& e) {
std::string out;
if (is_probably_integer(e.tag)) {
out = absl::StrFormat("tag(%" PRIdPTR ") ", (intptr_t)e.tag);
out = absl::StrFormat("tag(%" PRIdPTR ") ",
reinterpret_cast<intptr_t>(e.tag));
} else {
out = absl::StrFormat("%p ", e.tag);
}

@ -141,7 +141,8 @@ static void simple_request_body(grpc_end2end_test_config /*config*/,
char* dynamic_string = gpr_strdup("xyz");
grpc_call_cancel_with_status(c, GRPC_STATUS_UNIMPLEMENTED,
(const char*)dynamic_string, nullptr);
reinterpret_cast<const char*>(dynamic_string),
nullptr);
// The API of \a description allows for it to be a dynamic/non-const
// string, test this guarantee.
gpr_free(dynamic_string);

@ -343,7 +343,7 @@ static void test_max_concurrent_streams(grpc_end2end_test_config config) {
* both);
* check this here */
/* We'll get tag 303 or 403, we want 300, 400 */
live_call = (static_cast<int>((intptr_t)ev.tag)) - 1;
live_call = (static_cast<int>(reinterpret_cast<intptr_t>(ev.tag))) - 1;
got_client_start = 1;
}
}

@ -53,10 +53,11 @@ static void test_no_error_log(gpr_log_func_args* args) {
}
}
static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
static gpr_atm g_log_func = reinterpret_cast<gpr_atm>(gpr_default_log);
static void log_dispatcher_func(gpr_log_func_args* args) {
gpr_log_func log_func = (gpr_log_func)gpr_atm_no_barrier_load(&g_log_func);
gpr_log_func log_func =
reinterpret_cast<gpr_log_func>(gpr_atm_no_barrier_load(&g_log_func));
log_func(args);
}

@ -238,7 +238,7 @@ void resource_quota_server(grpc_end2end_test_config config) {
grpc_completion_queue_next(f.cq, n_seconds_from_now(60), nullptr);
GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
int ev_tag = static_cast<int>((intptr_t)ev.tag);
int ev_tag = static_cast<int>(reinterpret_cast<intptr_t>(ev.tag));
if (ev_tag < CLIENT_BASE_TAG) {
abort(); /* illegal tag */
} else if (ev_tag < SERVER_START_BASE_TAG) {

@ -77,7 +77,7 @@ static void test_early_server_shutdown_finishes_tags(
grpc_end2end_test_fixture f = begin_test(
config, "test_early_server_shutdown_finishes_tags", nullptr, nullptr);
cq_verifier* cqv = cq_verifier_create(f.cq);
grpc_call* s = (grpc_call*)static_cast<uintptr_t>(1);
grpc_call* s = reinterpret_cast<grpc_call*>(1);
grpc_call_details call_details;
grpc_metadata_array request_metadata_recv;

@ -50,7 +50,7 @@ int main(int /*argc*/, char** argv) {
std::string joined = grpc_core::JoinHostPort("::", port);
args[2] = const_cast<char*>(joined.c_str());
args[3] = const_cast<char*>("--no-secure");
svr = gpr_subprocess_create(4, (const char**)args);
svr = gpr_subprocess_create(4, const_cast<const char**>(args));
/* start the client */
command =
@ -62,7 +62,7 @@ int main(int /*argc*/, char** argv) {
args[3] = const_cast<char*>("--scenario=ping-pong-stream");
args[4] = const_cast<char*>("--no-secure");
args[5] = nullptr;
cli = gpr_subprocess_create(6, (const char**)args);
cli = gpr_subprocess_create(6, const_cast<const char**>(args));
/* wait for completion */
printf("waiting for client\n");

@ -52,7 +52,7 @@ int main(int /*argc*/, const char** argv) {
std::string joined = grpc_core::JoinHostPort("::", port);
args[2] = const_cast<char*>(joined.c_str());
args[3] = const_cast<char*>("--no-secure");
svr = gpr_subprocess_create(4, (const char**)args);
svr = gpr_subprocess_create(4, const_cast<const char**>(args));
/* start the client */
command =
@ -64,7 +64,7 @@ int main(int /*argc*/, const char** argv) {
args[3] = const_cast<char*>("--scenario=ping-pong-request");
args[4] = const_cast<char*>("--no-secure");
args[5] = nullptr;
cli = gpr_subprocess_create(6, (const char**)args);
cli = gpr_subprocess_create(6, const_cast<const char**>(args));
/* wait for completion */
printf("waiting for client\n");

@ -253,7 +253,7 @@ int main(int argc, char** argv) {
s = static_cast<call_state*>(ev.tag);
switch (ev.type) {
case GRPC_OP_COMPLETE:
switch ((intptr_t)s) {
switch (reinterpret_cast<intptr_t>(s)) {
case FLING_SERVER_NEW_REQUEST:
if (call != nullptr) {
if (0 == grpc_slice_str_cmp(call_details.method,

@ -51,7 +51,7 @@ static void test_init() {
#define CONCURRENT_TEST_THREADS 100
static void sleeping_thd(void* arg) {
int64_t sleep_ms = (int64_t)arg;
int64_t sleep_ms = reinterpret_cast<int64_t>(arg);
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(sleep_ms, GPR_TIMESPAN)));
}

@ -184,7 +184,8 @@ int main(int argc, char** argv) {
/* start the server */
args[1 + arg_shift] = const_cast<char*>("--port");
gpr_asprintf(&args[2 + arg_shift], "%d", port);
server = gpr_subprocess_create(3 + arg_shift, (const char**)args);
server =
gpr_subprocess_create(3 + arg_shift, const_cast<const char**>(args));
GPR_ASSERT(server);
gpr_free(args[0]);
if (arg_shift) gpr_free(args[1]);

@ -192,7 +192,7 @@ int main(int argc, char** argv) {
args[1 + arg_shift] = const_cast<char*>("--port");
gpr_asprintf(&args[2 + arg_shift], "%d", port);
args[3 + arg_shift] = const_cast<char*>("--ssl");
server = gpr_subprocess_create(4 + arg_shift, (const char**)args);
server = gpr_subprocess_create(4 + arg_shift, const_cast<const char**>(args));
GPR_ASSERT(server);
gpr_free(args[0]);
if (arg_shift) gpr_free(args[1]);

@ -42,7 +42,7 @@ static const int64_t kMillisIn25Days = 2160000000;
static const int64_t kHoursIn25Days = 600;
static void cb(void* arg, grpc_error* error) {
cb_called[(intptr_t)arg][error == GRPC_ERROR_NONE]++;
cb_called[reinterpret_cast<intptr_t>(arg)][error == GRPC_ERROR_NONE]++;
}
static void add_test(void) {

@ -61,7 +61,7 @@ static void test_basic_add_find(uint32_t n) {
GPR_ASSERT(nullptr == grpc_chttp2_stream_map_find(&map, 0));
GPR_ASSERT(nullptr == grpc_chttp2_stream_map_find(&map, n + 1));
for (i = 1; i <= n; i++) {
got = (uintptr_t)grpc_chttp2_stream_map_find(&map, i);
got = reinterpret_cast<uintptr_t>(grpc_chttp2_stream_map_find(&map, i));
GPR_ASSERT(i == got);
}
grpc_chttp2_stream_map_destroy(&map);
@ -84,7 +84,7 @@ static void check_delete_evens(grpc_chttp2_stream_map* map, uint32_t n) {
GPR_ASSERT(nullptr == grpc_chttp2_stream_map_find(map, n + 1));
for (i = 1; i <= n; i++) {
if (i & 1) {
got = (uintptr_t)grpc_chttp2_stream_map_find(map, i);
got = reinterpret_cast<uintptr_t>(grpc_chttp2_stream_map_find(map, i));
GPR_ASSERT(i == got);
} else {
GPR_ASSERT(nullptr == grpc_chttp2_stream_map_find(map, i));

@ -197,7 +197,8 @@ void tsi_test_frame_protector_send_message_to_peer(
uint8_t* message =
is_client ? config->client_message : config->server_message;
GPR_ASSERT(message != nullptr);
const unsigned char* message_bytes = (const unsigned char*)message;
const unsigned char* message_bytes =
reinterpret_cast<unsigned char*>(message);
tsi_result result = TSI_OK;
/* Do protect and send protected data to peer. */
while (message_size > 0 && result == TSI_OK) {
@ -370,10 +371,10 @@ static void do_handshaker_next(handshaker_args* args) {
args->transferred_data = true;
}
/* Peform handshaker next. */
result = tsi_handshaker_next(handshaker, args->handshake_buffer, buf_size,
(const unsigned char**)&bytes_to_send,
&bytes_to_send_size, &handshaker_result,
&on_handshake_next_done_wrapper, args);
result = tsi_handshaker_next(
handshaker, args->handshake_buffer, buf_size,
const_cast<const unsigned char**>(&bytes_to_send), &bytes_to_send_size,
&handshaker_result, &on_handshake_next_done_wrapper, args);
if (result != TSI_ASYNC) {
args->error = on_handshake_next_done(
result, args, bytes_to_send, bytes_to_send_size, handshaker_result);

@ -132,7 +132,8 @@ grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
grpc_resource_quota* resource_quota) {
mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
m->base.vtable = &vtable;
std::string name = absl::StrFormat("mock_endpoint_%" PRIxPTR, (intptr_t)m);
std::string name =
absl::StrFormat("mock_endpoint_%" PRIxPTR, reinterpret_cast<intptr_t>(m));
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
grpc_slice_buffer_init(&m->read_buffer);
gpr_mu_init(&m->mu);

@ -197,8 +197,9 @@ static void half_init(half* m, passthru_endpoint* parent,
m->parent = parent;
grpc_slice_buffer_init(&m->read_buffer);
m->on_read = nullptr;
std::string name = absl::StrFormat("passthru_endpoint_%s_%" PRIxPTR,
half_name, (intptr_t)parent);
std::string name =
absl::StrFormat("passthru_endpoint_%s_%" PRIxPTR, half_name,
reinterpret_cast<intptr_t>(parent));
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
}

@ -48,7 +48,7 @@ class TestSocketMutator : public grpc_socket_mutator {
//
bool test_mutator_mutate_fd(int fd, grpc_socket_mutator* mutator) {
TestSocketMutator* tsm = (TestSocketMutator*)mutator;
TestSocketMutator* tsm = reinterpret_cast<TestSocketMutator*>(mutator);
return tsm->MutateFd(fd);
}
@ -57,7 +57,7 @@ int test_mutator_compare(grpc_socket_mutator* a, grpc_socket_mutator* b) {
}
void test_mutator_destroy(grpc_socket_mutator* mutator) {
TestSocketMutator* tsm = (TestSocketMutator*)mutator;
TestSocketMutator* tsm = reinterpret_cast<TestSocketMutator*>(mutator);
delete tsm;
}

@ -284,7 +284,7 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
while (need_tags) {
TrickleCQNext(fixture.get(), &t, &ok, -1);
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -330,7 +330,7 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
request_rw->Read(&recv_response, tag(0));
continue;
}
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}

@ -83,7 +83,7 @@ static void BM_StreamingPingPong(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -99,7 +99,7 @@ static void BM_StreamingPingPong(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
// If server recv is complete, start the server send operation
if (i == 1) {
@ -122,7 +122,7 @@ static void BM_StreamingPingPong(benchmark::State& state) {
need_tags = (1 << 0) | (1 << 1) | (1 << 2);
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -175,7 +175,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -190,7 +190,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
// If server recv is complete, start the server send operation
if (i == 1) {
@ -210,7 +210,7 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
need_tags = (1 << 0) | (1 << 1) | (1 << 2);
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -297,10 +297,10 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
// established). It is necessary when client init metadata is
// coalesced
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
while (static_cast<int>((intptr_t)t) != 0) {
while (static_cast<int>(reinterpret_cast<intptr_t>(t)) != 0) {
// In some cases tag:2 comes before tag:0 (write tag comes out
// first), this while loop is to make sure get tag:0.
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(await_tags & (1 << i));
await_tags &= ~(1 << i);
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@ -317,7 +317,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
while (await_tags != 0) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
// If server recv is complete, start the server send operation
if (i == 3) {
@ -367,8 +367,8 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
// wait for server call data structure(call_hook, etc.) to be
// initialized, since initial metadata is corked.
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
while (static_cast<int>((intptr_t)t) != 0) {
int i = static_cast<int>((intptr_t)t);
while (static_cast<int>(reinterpret_cast<intptr_t>(t)) != 0) {
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(expect_tags & (1 << i));
expect_tags &= ~(1 << i);
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@ -385,7 +385,7 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
while (expect_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(expect_tags & (1 << i));
expect_tags &= ~(1 << i);
}

@ -62,7 +62,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -85,7 +85,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
need_tags = (1 << 0) | (1 << 1);
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -95,7 +95,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
need_tags = (1 << 0) | (1 << 1);
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -131,7 +131,7 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
GPR_ASSERT(ok);
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}
@ -154,7 +154,7 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
need_tags = (1 << 0) | (1 << 1);
while (need_tags) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
int i = static_cast<int>((intptr_t)t);
int i = static_cast<int>(reinterpret_cast<intptr_t>(t));
GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i);
}

@ -64,7 +64,7 @@ bool mock_socket_mutator_mutate_fd(int /*fd*/, grpc_socket_mutator* m) {
int mock_socket_mutator_compare(grpc_socket_mutator* a,
grpc_socket_mutator* b) {
return (uintptr_t)a - (uintptr_t)b;
return reinterpret_cast<uintptr_t>(a) - reinterpret_cast<uintptr_t>(b);
}
void mock_socket_mutator_destroy(grpc_socket_mutator* m) {

@ -88,7 +88,7 @@ int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
return grpc::testing::GrpcToolMainLib(
argc, (const char**)argv, grpc::testing::CliCredentials(),
argc, const_cast<const char**>(argv), grpc::testing::CliCredentials(),
std::bind(SimplePrint, absl::GetFlag(FLAGS_outfile),
std::placeholders::_1));
}

Loading…
Cancel
Save