Merge pull request #24841 from veblush/tidy-google-readability-casting

[Clang-Tidy] google-readability-casting
reviewable/pr24701/r19^2
Esun Kim 4 years ago committed by GitHub
commit 6c440e820b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      .clang-tidy
  2. 12
      src/core/ext/filters/client_channel/client_channel.cc
  3. 2
      src/core/ext/filters/client_channel/client_channel_plugin.cc
  4. 2
      src/core/ext/filters/client_channel/http_connect_handshaker.cc
  5. 2
      src/core/ext/filters/client_channel/http_proxy.cc
  6. 42
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  7. 4
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  8. 4
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  9. 2
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
  10. 7
      src/core/ext/filters/client_channel/subchannel.cc
  11. 6
      src/core/ext/filters/deadline/deadline_filter.cc
  12. 12
      src/core/ext/filters/http/client_authority_filter.cc
  13. 9
      src/core/ext/filters/http/http_filters_plugin.cc
  14. 2
      src/core/ext/filters/message_size/message_size_filter.cc
  15. 2
      src/core/ext/filters/workarounds/workaround_utils.cc
  16. 3
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
  17. 4
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  18. 4
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  19. 9
      src/core/ext/transport/inproc/inproc_transport.cc
  20. 24
      src/core/ext/xds/xds_client.cc
  21. 5
      src/core/lib/compression/compression_args.cc
  22. 5
      src/core/lib/gpr/alloc.cc
  23. 4
      src/core/lib/gpr/log.cc
  24. 7
      src/core/lib/gpr/string.cc
  25. 8
      src/core/lib/gpr/sync.cc
  26. 7
      src/core/lib/gprpp/thd_posix.cc
  27. 4
      src/core/lib/gprpp/thd_windows.cc
  28. 2
      src/core/lib/http/httpcli.cc
  29. 3
      src/core/lib/http/parser.cc
  30. 13
      src/core/lib/iomgr/call_combiner.cc
  31. 3
      src/core/lib/iomgr/combiner.cc
  32. 26
      src/core/lib/iomgr/error.cc
  33. 31
      src/core/lib/iomgr/ev_epoll1_linux.cc
  34. 30
      src/core/lib/iomgr/ev_epollex_linux.cc
  35. 12
      src/core/lib/iomgr/ev_poll_posix.cc
  36. 3
      src/core/lib/iomgr/executor.cc
  37. 2
      src/core/lib/iomgr/iomgr.cc
  38. 27
      src/core/lib/iomgr/lockfree_event.cc
  39. 2
      src/core/lib/iomgr/pollset_set_custom.cc
  40. 2
      src/core/lib/iomgr/python_util.h
  41. 8
      src/core/lib/iomgr/resource_quota.cc
  42. 20
      src/core/lib/iomgr/sockaddr_utils.cc
  43. 2
      src/core/lib/iomgr/sockaddr_utils.h
  44. 5
      src/core/lib/iomgr/socket_factory_posix.cc
  45. 5
      src/core/lib/iomgr/socket_mutator.cc
  46. 13
      src/core/lib/iomgr/tcp_client_custom.cc
  47. 39
      src/core/lib/iomgr/tcp_custom.cc
  48. 10
      src/core/lib/iomgr/tcp_posix.cc
  49. 40
      src/core/lib/iomgr/tcp_server_custom.cc
  50. 6
      src/core/lib/iomgr/timer_custom.cc
  51. 3
      src/core/lib/iomgr/udp_server.cc
  52. 3
      src/core/lib/iomgr/udp_server.h
  53. 13
      src/core/lib/iomgr/unix_sockets_posix.cc
  54. 5
      src/core/lib/security/context/security_context.cc
  55. 10
      src/core/lib/security/credentials/credentials.cc
  56. 3
      src/core/lib/security/credentials/external/aws_request_signer.cc
  57. 3
      src/core/lib/security/credentials/fake/fake_credentials.cc
  58. 4
      src/core/lib/security/credentials/google_default/google_default_credentials.cc
  59. 2
      src/core/lib/security/credentials/jwt/json_token.cc
  60. 6
      src/core/lib/security/credentials/jwt/jwt_verifier.cc
  61. 13
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  62. 2
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  63. 10
      src/core/lib/security/credentials/ssl/ssl_credentials.cc
  64. 2
      src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc
  65. 2
      src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc
  66. 2
      src/core/lib/security/credentials/tls/tls_credentials.cc
  67. 5
      src/core/lib/security/security_connector/security_connector.cc
  68. 6
      src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
  69. 4
      src/core/lib/security/security_connector/ssl_utils.cc
  70. 6
      src/core/lib/security/security_connector/tls/tls_security_connector.cc
  71. 4
      src/core/lib/security/transport/security_handshaker.cc
  72. 47
      src/core/lib/surface/call.cc
  73. 2
      src/core/lib/surface/channel_init.cc
  74. 31
      src/core/lib/surface/completion_queue.cc
  75. 11
      src/core/lib/surface/init.cc
  76. 2
      src/core/lib/transport/static_metadata.cc
  77. 7
      src/core/lib/transport/status_metadata.cc
  78. 9
      src/core/tsi/alts/crypt/gsec.cc
  79. 14
      src/core/tsi/alts/frame_protector/frame_handler.cc
  80. 2
      src/core/tsi/alts/handshaker/alts_handshaker_client.cc
  81. 14
      src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc
  82. 8
      src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc
  83. 8
      src/core/tsi/fake_transport_security.cc
  84. 30
      src/core/tsi/ssl_transport_security.cc
  85. 2
      src/cpp/common/channel_filter.cc
  86. 4
      src/cpp/server/server_builder.cc
  87. 8
      test/core/bad_client/bad_client.cc
  88. 2
      test/core/bad_client/tests/duplicate_header.cc
  89. 2
      test/core/bad_client/tests/head_of_line_blocking.cc
  90. 2
      test/core/bad_client/tests/server_registered_method.cc
  91. 2
      test/core/bad_client/tests/simple_request.cc
  92. 2
      test/core/bad_connection/close_fd_test.cc
  93. 4
      test/core/bad_ssl/bad_ssl_test.cc
  94. 3
      test/core/bad_ssl/server_common.cc
  95. 12
      test/core/channel/channel_stack_builder_test.cc
  96. 4
      test/core/channel/channel_trace_test.cc
  97. 4
      test/core/channel/channelz_test.cc
  98. 4
      test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
  99. 2
      test/core/client_channel/resolvers/dns_resolver_cooldown_test.cc
  100. 2
      test/core/client_channel/resolvers/fake_resolver_test.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -11,7 +11,6 @@ Checks: '-*,
-performance-unnecessary-value-param,
google-*,
-google-explicit-constructor,
-google-readability-casting,
-google-runtime-int,
-google-runtime-references,
misc-definitions-in-headers,

@ -2516,8 +2516,9 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_initial_metadata_storage_ == nullptr);
grpc_metadata_batch* send_initial_metadata =
batch->payload->send_initial_metadata.send_initial_metadata;
send_initial_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
send_initial_metadata_storage_ =
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count));
grpc_metadata_batch_copy(send_initial_metadata, &send_initial_metadata_,
send_initial_metadata_storage_);
send_initial_metadata_flags_ =
@ -2536,8 +2537,9 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_trailing_metadata_storage_ == nullptr);
grpc_metadata_batch* send_trailing_metadata =
batch->payload->send_trailing_metadata.send_trailing_metadata;
send_trailing_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
send_trailing_metadata_storage_ =
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count));
grpc_metadata_batch_copy(send_trailing_metadata, &send_trailing_metadata_,
send_trailing_metadata_storage_);
}
@ -3039,7 +3041,7 @@ bool CallData::MaybeRetry(grpc_call_element* elem,
gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
chand, this, ms);
}
server_pushback_ms = (grpc_millis)ms;
server_pushback_ms = static_cast<grpc_millis>(ms);
}
}
DoRetry(elem, retry_state, server_pushback_ms);

@ -54,7 +54,7 @@ void grpc_client_channel_init(void) {
grpc_core::GlobalSubchannelPool::Init();
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY, append_filter,
(void*)&grpc_client_channel_filter);
const_cast<grpc_channel_filter*>(&grpc_client_channel_filter));
grpc_http_connect_register_handshaker_factory();
grpc_client_channel_global_init_backup_polling();
}

@ -332,7 +332,7 @@ void HttpConnectHandshaker::DoHandshake(grpc_tcp_server_acceptor* /*acceptor*/,
grpc_httpcli_request request;
request.host = server_name;
request.ssl_host_override = nullptr;
request.http.method = (char*)"CONNECT";
request.http.method = const_cast<char*>("CONNECT");
request.http.path = server_name;
request.http.version = GRPC_HTTP_HTTP10; // Set by OnReadDone
request.http.hdrs = headers;

@ -172,7 +172,7 @@ class HttpProxyMapper : public ProxyMapperInterface {
}
grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
(char*)GRPC_ARG_HTTP_CONNECT_SERVER,
const_cast<char*>(GRPC_ARG_HTTP_CONNECT_SERVER),
uri->path[0] == '/' ? uri->path + 1 : uri->path);
if (user_cred != nullptr) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */

@ -491,7 +491,7 @@ bool GrpcLb::Serverlist::operator==(const Serverlist& other) const {
void ParseServer(const GrpcLbServer& server, grpc_resolved_address* addr) {
memset(addr, 0, sizeof(*addr));
if (server.drop) return;
const uint16_t netorder_port = grpc_htons((uint16_t)server.port);
const uint16_t netorder_port = grpc_htons(static_cast<uint16_t>(server.port));
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
if (server.ip_size == 4) {
@ -502,7 +502,8 @@ void ParseServer(const GrpcLbServer& server, grpc_resolved_address* addr) {
addr4->sin_port = netorder_port;
} else if (server.ip_size == 16) {
addr->len = static_cast<socklen_t>(sizeof(grpc_sockaddr_in6));
grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)&addr->addr;
grpc_sockaddr_in6* addr6 =
reinterpret_cast<grpc_sockaddr_in6*>(&addr->addr);
addr6->sin6_family = GRPC_AF_INET6;
memcpy(&addr6->sin6_addr, server.ip_addr, server.ip_size);
addr6->sin6_port = netorder_port;
@ -532,17 +533,18 @@ bool IsServerValid(const GrpcLbServer& server, size_t idx, bool log) {
if (GPR_UNLIKELY(server.port >> 16 != 0)) {
if (log) {
gpr_log(GPR_ERROR,
"Invalid port '%d' at index %lu of serverlist. Ignoring.",
server.port, (unsigned long)idx);
"Invalid port '%d' at index %" PRIuPTR
" of serverlist. Ignoring.",
server.port, idx);
}
return false;
}
if (GPR_UNLIKELY(server.ip_size != 4 && server.ip_size != 16)) {
if (log) {
gpr_log(GPR_ERROR,
"Expected IP to be 4 or 16 bytes, got %d at index %lu of "
"serverlist. Ignoring",
server.ip_size, (unsigned long)idx);
"Expected IP to be 4 or 16 bytes, got %d at index %" PRIuPTR
" of serverlist. Ignoring",
server.ip_size, idx);
}
return false;
}
@ -844,8 +846,9 @@ void GrpcLb::BalancerCallState::StartQuery() {
// with the callback.
auto self = Ref(DEBUG_LOCATION, "on_initial_request_sent");
self.release();
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_initial_request_sent_);
call_error = grpc_call_start_batch_and_execute(lb_call_, ops,
static_cast<size_t>(op - ops),
&lb_on_initial_request_sent_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv initial metadata.
op = ops;
@ -867,7 +870,8 @@ void GrpcLb::BalancerCallState::StartQuery() {
self = Ref(DEBUG_LOCATION, "on_message_received");
self.release();
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_message_received_);
lb_call_, ops, static_cast<size_t>(op - ops),
&lb_on_balancer_message_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
op = ops;
@ -883,7 +887,8 @@ void GrpcLb::BalancerCallState::StartQuery() {
// ref instead of a new ref. When it's invoked, it's the initial ref that is
// unreffed.
call_error = grpc_call_start_batch_and_execute(
lb_call_, ops, (size_t)(op - ops), &lb_on_balancer_status_received_);
lb_call_, ops, static_cast<size_t>(op - ops),
&lb_on_balancer_status_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
@ -1354,7 +1359,7 @@ GrpcLb::GrpcLb(Args args)
}
GrpcLb::~GrpcLb() {
gpr_free((void*)server_name_);
gpr_free(const_cast<char*>(server_name_));
grpc_channel_args_destroy(args_);
}
@ -1457,7 +1462,7 @@ void GrpcLb::ProcessAddressesAndChannelArgsLocked(
// since we use this to trigger the client_load_reporting filter.
static const char* args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME};
grpc_arg new_arg = grpc_channel_arg_string_create(
(char*)GRPC_ARG_LB_POLICY_NAME, (char*)"grpclb");
const_cast<char*>(GRPC_ARG_LB_POLICY_NAME), const_cast<char*>("grpclb"));
grpc_channel_args_destroy(args_);
args_ = grpc_channel_args_copy_and_add_and_remove(
&args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1);
@ -1766,7 +1771,8 @@ bool maybe_add_client_load_reporting_filter(grpc_channel_stack_builder* builder,
// will minimize the number of metadata elements that the filter
// needs to iterate through to find the ClientStats object.
return grpc_channel_stack_builder_prepend_filter(
builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
builder, static_cast<const grpc_channel_filter*>(arg), nullptr,
nullptr);
}
return true;
}
@ -1777,10 +1783,10 @@ void grpc_lb_policy_grpclb_init() {
grpc_core::LoadBalancingPolicyRegistry::Builder::
RegisterLoadBalancingPolicyFactory(
absl::make_unique<grpc_core::GrpcLbFactory>());
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
(void*)&grpc_client_load_reporting_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_client_load_reporting_filter,
const_cast<grpc_channel_filter*>(&grpc_client_load_reporting_filter));
}
void grpc_lb_policy_grpclb_shutdown() {}

@ -43,8 +43,8 @@ namespace grpc_core {
class GrpcPolledFdPosix : public GrpcPolledFd {
public:
GrpcPolledFdPosix(ares_socket_t as, grpc_pollset_set* driver_pollset_set)
: name_(absl::StrCat("c-ares fd: ", (int)as)), as_(as) {
fd_ = grpc_fd_create((int)as, name_.c_str(), false);
: name_(absl::StrCat("c-ares fd: ", static_cast<int>(as))), as_(as) {
fd_ = grpc_fd_create(static_cast<int>(as), name_.c_str(), false);
driver_pollset_set_ = driver_pollset_set;
grpc_pollset_set_add_fd(driver_pollset_set_, fd_);
}

@ -139,8 +139,8 @@ void grpc_cares_wrapper_address_sorting_sort(const grpc_ares_request* r,
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_cares_address_sorting)) {
log_address_sorting_list(r, *addresses, "input");
}
address_sorting_sortable* sortables = (address_sorting_sortable*)gpr_zalloc(
sizeof(address_sorting_sortable) * addresses->size());
address_sorting_sortable* sortables = static_cast<address_sorting_sortable*>(
gpr_zalloc(sizeof(address_sorting_sortable) * addresses->size()));
for (size_t i = 0; i < addresses->size(); ++i) {
sortables[i].user_data = &(*addresses)[i];
memcpy(&sortables[i].dest_addr.addr, &(*addresses)[i].address().addr,

@ -339,7 +339,7 @@ grpc_arg FakeResolverResponseGenerator::MakeChannelArg(
FakeResolverResponseGenerator* generator) {
grpc_arg arg;
arg.type = GRPC_ARG_POINTER;
arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR;
arg.key = const_cast<char*>(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR);
arg.value.pointer.p = generator;
arg.value.pointer.vtable = &response_generator_arg_vtable;
return arg;

@ -189,7 +189,8 @@ void SubchannelCall::StartTransportStreamOpBatch(
void* SubchannelCall::GetParentData() {
grpc_channel_stack* chanstk = connected_subchannel_->channel_stack();
return (char*)this + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
return reinterpret_cast<char*>(this) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(chanstk->call_stack_size);
}
@ -702,7 +703,7 @@ Subchannel::Subchannel(SubchannelKey* key,
const grpc_integer_options options = {
GRPC_MAX_CHANNEL_TRACE_EVENT_MEMORY_PER_NODE_DEFAULT, 0, INT_MAX};
size_t channel_tracer_max_memory =
(size_t)grpc_channel_arg_get_integer(arg, options);
static_cast<size_t>(grpc_channel_arg_get_integer(arg, options));
if (channelz_enabled) {
channelz_node_ = MakeRefCounted<channelz::SubchannelNode>(
GetTargetAddress(), channel_tracer_max_memory);
@ -912,7 +913,7 @@ void Subchannel::ResetBackoff() {
grpc_arg Subchannel::CreateSubchannelAddressArg(
const grpc_resolved_address* addr) {
return grpc_channel_arg_string_create(
(char*)GRPC_ARG_SUBCHANNEL_ADDRESS,
const_cast<char*>(GRPC_ARG_SUBCHANNEL_ADDRESS),
gpr_strdup(addr->len > 0 ? grpc_sockaddr_to_uri(addr).c_str() : ""));
}

@ -383,10 +383,12 @@ static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder,
void grpc_deadline_filter_init(void) {
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
maybe_add_deadline_filter,
const_cast<grpc_channel_filter*>(&grpc_client_deadline_filter));
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
maybe_add_deadline_filter,
const_cast<grpc_channel_filter*>(&grpc_server_deadline_filter));
}
void grpc_deadline_filter_shutdown(void) {}

@ -148,12 +148,12 @@ static bool add_client_authority_filter(grpc_channel_stack_builder* builder,
}
void grpc_client_authority_filter_init(void) {
grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL, INT_MAX,
add_client_authority_filter,
(void*)&grpc_client_authority_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
add_client_authority_filter,
(void*)&grpc_client_authority_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, INT_MAX, add_client_authority_filter,
const_cast<grpc_channel_filter*>(&grpc_client_authority_filter));
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX, add_client_authority_filter,
const_cast<grpc_channel_filter*>(&grpc_client_authority_filter));
}
void grpc_client_authority_filter_shutdown(void) {}

@ -93,13 +93,16 @@ void grpc_http_filters_init(void) {
maybe_add_optional_filter<true>, &decompress_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_SUBCHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_required_filter, (void*)&grpc_http_client_filter);
maybe_add_required_filter,
const_cast<grpc_channel_filter*>(&grpc_http_client_filter));
grpc_channel_init_register_stage(
GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_required_filter, (void*)&grpc_http_client_filter);
maybe_add_required_filter,
const_cast<grpc_channel_filter*>(&grpc_http_client_filter));
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
maybe_add_required_filter, (void*)&grpc_http_server_filter);
maybe_add_required_filter,
const_cast<grpc_channel_filter*>(&grpc_http_server_filter));
}
void grpc_http_filters_shutdown(void) {}

@ -302,7 +302,7 @@ static grpc_error* message_size_init_call_elem(
static void message_size_destroy_call_elem(
grpc_call_element* elem, const grpc_call_final_info* /*final_info*/,
grpc_closure* /*ignored*/) {
call_data* calld = (call_data*)elem->call_data;
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->~call_data();
}

@ -42,7 +42,7 @@ grpc_workaround_user_agent_md* grpc_parse_user_agent(grpc_mdelem md) {
user_agent_md->workaround_active[i] = ua_parser[i](md);
}
}
grpc_mdelem_set_user_data(md, destroy_user_agent_md, (void*)user_agent_md);
grpc_mdelem_set_user_data(md, destroy_user_agent_md, user_agent_md);
return user_agent_md;
}

@ -42,7 +42,8 @@ grpc_channel* grpc_insecure_channel_create_from_fd(
(target, fd, args));
grpc_arg default_authority_arg = grpc_channel_arg_string_create(
(char*)GRPC_ARG_DEFAULT_AUTHORITY, (char*)"test.authority");
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY),
const_cast<char*>("test.authority"));
grpc_channel_args* final_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -616,7 +616,7 @@ grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
metadata_buffer{grpc_chttp2_incoming_metadata_buffer(arena),
grpc_chttp2_incoming_metadata_buffer(arena)} {
if (server_data) {
id = static_cast<uint32_t>((uintptr_t)server_data);
id = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(server_data));
*t->accepting_stream = this;
grpc_chttp2_stream_map_add(&t->stream_map, id, this);
post_destructive_reclaimer(t);
@ -749,7 +749,7 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
GPR_ASSERT(t->accepting_stream == nullptr);
t->accepting_stream = &accepting;
t->accept_stream_cb(t->accept_stream_cb_user_data, &t->base,
(void*)static_cast<uintptr_t>(id));
reinterpret_cast<void*>(id));
t->accepting_stream = nullptr;
return accepting;
}

@ -1059,8 +1059,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
unsigned int header_index;
for (header_index = 0; header_index < s->header_array.count;
header_index++) {
gpr_free((void*)s->header_array.headers[header_index].key);
gpr_free((void*)s->header_array.headers[header_index].value);
gpr_free(const_cast<char*>(s->header_array.headers[header_index].key));
gpr_free(const_cast<char*>(s->header_array.headers[header_index].value));
}
stream_state->state_op_done[OP_SEND_INITIAL_METADATA] = true;
if (t->use_packet_coalescing) {

@ -153,10 +153,11 @@ struct inproc_stream {
// side to avoid destruction
INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p",
st->accept_stream_cb, st->accept_stream_data);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)this);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, this);
} else {
// This is the server-side and is being called through accept_stream_cb
inproc_stream* cs = (inproc_stream*)server_data;
inproc_stream* cs = const_cast<inproc_stream*>(
static_cast<const inproc_stream*>(server_data));
other_side = cs;
// Ref the server-side stream on behalf of the client now
ref("inproc_init_stream:srv");
@ -1281,8 +1282,8 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
// Add a default authority channel argument for the client
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
default_authority_arg.key = (char*)GRPC_ARG_DEFAULT_AUTHORITY;
default_authority_arg.value.string = (char*)"inproc.authority";
default_authority_arg.key = const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY);
default_authority_arg.value.string = const_cast<char*>("inproc.authority");
grpc_channel_args* client_args =
grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

@ -707,8 +707,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
op->reserved = nullptr;
op++;
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
nullptr);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), nullptr);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: send request message.
GRPC_CLOSURE_INIT(&on_request_sent_, OnRequestSent, this,
@ -742,8 +742,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
Ref(DEBUG_LOCATION, "ADS+OnResponseReceivedLocked").release();
GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
&on_response_received_);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), &on_response_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
op = ops;
@ -759,8 +759,8 @@ XdsClient::ChannelState::AdsCallState::AdsCallState(
// unreffed.
GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
&on_status_received_);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), &on_status_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
@ -1479,8 +1479,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
Ref(DEBUG_LOCATION, "LRS+OnInitialRequestSentLocked").release();
GRPC_CLOSURE_INIT(&on_initial_request_sent_, OnInitialRequestSent, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
&on_initial_request_sent_);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), &on_initial_request_sent_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv initial metadata.
op = ops;
@ -1499,8 +1499,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
Ref(DEBUG_LOCATION, "LRS+OnResponseReceivedLocked").release();
GRPC_CLOSURE_INIT(&on_response_received_, OnResponseReceived, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
&on_response_received_);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), &on_response_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
// Op: recv server status.
op = ops;
@ -1516,8 +1516,8 @@ XdsClient::ChannelState::LrsCallState::LrsCallState(
// unreffed.
GRPC_CLOSURE_INIT(&on_status_received_, OnStatusReceived, this,
grpc_schedule_on_exec_ctx);
call_error = grpc_call_start_batch_and_execute(call_, ops, (size_t)(op - ops),
&on_status_received_);
call_error = grpc_call_start_batch_and_execute(
call_, ops, static_cast<size_t>(op - ops), &on_status_received_);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}

@ -55,7 +55,7 @@ grpc_channel_args* grpc_channel_args_set_channel_default_compression_algorithm(
GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT);
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM;
tmp.key = const_cast<char*>(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM);
tmp.value.integer = algorithm;
return grpc_channel_args_copy_and_add(a, &tmp, 1);
}
@ -108,7 +108,8 @@ grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
/* create a new arg */
grpc_arg tmp;
tmp.type = GRPC_ARG_INTEGER;
tmp.key = (char*)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET;
tmp.key =
const_cast<char*>(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET);
/* all enabled by default */
tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1;
if (state != 0) {

@ -66,9 +66,10 @@ void* gpr_malloc_aligned(size_t size, size_t alignment) {
GPR_ASSERT(((alignment - 1) & alignment) == 0); // Must be power of 2.
size_t extra = alignment - 1 + sizeof(void*);
void* p = gpr_malloc(size + extra);
void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1));
void** ret = reinterpret_cast<void**>(
(reinterpret_cast<uintptr_t>(p) + extra) & ~(alignment - 1));
ret[-1] = p;
return (void*)ret;
return ret;
}
void gpr_free_aligned(void* ptr) { gpr_free((static_cast<void**>(ptr))[-1]); }

@ -38,7 +38,7 @@ static constexpr gpr_atm GPR_LOG_SEVERITY_UNSET = GPR_LOG_SEVERITY_ERROR + 10;
static constexpr gpr_atm GPR_LOG_SEVERITY_NONE = GPR_LOG_SEVERITY_ERROR + 11;
void gpr_default_log(gpr_log_func_args* args);
static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
static gpr_atm g_log_func = reinterpret_cast<gpr_atm>(gpr_default_log);
static gpr_atm g_min_severity_to_print = GPR_LOG_SEVERITY_UNSET;
static gpr_atm g_min_severity_to_print_stacktrace = GPR_LOG_SEVERITY_UNSET;
@ -80,7 +80,7 @@ void gpr_log_message(const char* file, int line, gpr_log_severity severity,
lfargs.line = line;
lfargs.severity = severity;
lfargs.message = message;
((gpr_log_func)gpr_atm_no_barrier_load(&g_log_func))(&lfargs);
reinterpret_cast<gpr_log_func>(gpr_atm_no_barrier_load(&g_log_func))(&lfargs);
}
void gpr_set_log_verbosity(gpr_log_severity min_severity_to_print) {

@ -55,7 +55,7 @@ char* gpr_strdup(const char* src) {
std::string gpr_format_timespec(gpr_timespec tm) {
char time_buffer[35];
char ns_buffer[11]; // '.' + 9 digits of precision
struct tm* tm_info = localtime((const time_t*)&tm.tv_sec);
struct tm* tm_info = localtime(reinterpret_cast<time_t*>(&tm.tv_sec));
strftime(time_buffer, sizeof(time_buffer), "%Y-%m-%dT%H:%M:%S", tm_info);
snprintf(ns_buffer, 11, ".%09d", tm.tv_nsec);
// This loop trims off trailing zeros by inserting a null character that the
@ -119,7 +119,8 @@ static void asciidump(dump_out* out, const char* buf, size_t len) {
dump_out_append(out, '\'');
}
for (cur = beg; cur != end; ++cur) {
dump_out_append(out, (isprint(*cur) ? *(char*)cur : '.'));
dump_out_append(
out, (isprint(*cur) ? *reinterpret_cast<const char*>(cur) : '.'));
}
if (!out_was_empty) {
dump_out_append(out, '\'');
@ -311,7 +312,7 @@ void gpr_string_split(const char* input, const char* sep, char*** strs,
void* gpr_memrchr(const void* s, int c, size_t n) {
if (s == nullptr) return nullptr;
char* b = (char*)s;
char* b = const_cast<char*>(reinterpret_cast<const char*>(s));
size_t i;
for (i = 0; i < n; i++) {
if (b[n - i - 1] == c) {

@ -48,7 +48,7 @@ static void event_initialize(void) {
/* Hash ev into an element of sync_array[]. */
static struct sync_array_s* hash(gpr_event* ev) {
return &sync_array[((uintptr_t)ev) % event_sync_partitions];
return &sync_array[reinterpret_cast<uintptr_t>(ev) % event_sync_partitions];
}
void gpr_event_init(gpr_event* ev) {
@ -67,16 +67,16 @@ void gpr_event_set(gpr_event* ev, void* value) {
}
void* gpr_event_get(gpr_event* ev) {
return (void*)gpr_atm_acq_load(&ev->state);
return reinterpret_cast<void*>(gpr_atm_acq_load(&ev->state));
}
void* gpr_event_wait(gpr_event* ev, gpr_timespec abs_deadline) {
void* result = (void*)gpr_atm_acq_load(&ev->state);
void* result = reinterpret_cast<void*>(gpr_atm_acq_load(&ev->state));
if (result == nullptr) {
struct sync_array_s* s = hash(ev);
gpr_mu_lock(&s->mu);
do {
result = (void*)gpr_atm_acq_load(&ev->state);
result = reinterpret_cast<void*>(gpr_atm_acq_load(&ev->state));
} while (result == nullptr && !gpr_cv_wait(&s->cv, &s->mu, abs_deadline));
gpr_mu_unlock(&s->mu);
}

@ -199,6 +199,11 @@ Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
} // namespace grpc_core
// The following is in the external namespace as it is exposed as C89 API
gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)pthread_self(); }
gpr_thd_id gpr_thd_currentid(void) {
// Use C-style casting because Linux and OSX have different definitions
// of pthread_t so that a single C++ cast doesn't handle it.
// NOLINTNEXTLINE(google-readability-casting)
return (gpr_thd_id)pthread_self();
}
#endif /* GPR_POSIX_SYNC */

@ -171,6 +171,8 @@ Thread::Thread(const char* thd_name, void (*thd_body)(void* arg), void* arg,
} // namespace grpc_core
gpr_thd_id gpr_thd_currentid(void) { return (gpr_thd_id)g_thd_info; }
gpr_thd_id gpr_thd_currentid(void) {
return reinterpret_cast<gpr_thd_id>(g_thd_info);
}
#endif /* GPR_WINDOWS */

@ -209,7 +209,7 @@ static void next_address(internal_request* req, grpc_error* error) {
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
(char*)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA), req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(&req->connected, &req->ep, req->context->pollset_set,

@ -281,8 +281,7 @@ static grpc_error* addbyte_body(grpc_http_parser* parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
*body =
static_cast<char*>(gpr_realloc((void*)*body, parser->body_capacity));
*body = static_cast<char*>(gpr_realloc(*body, parser->body_capacity));
}
(*body)[*body_length] = static_cast<char>(byte);
(*body_length)++;

@ -34,13 +34,14 @@ namespace {
grpc_error* DecodeCancelStateError(gpr_atm cancel_state) {
if (cancel_state & 1) {
return (grpc_error*)(cancel_state & ~static_cast<gpr_atm>(1));
return reinterpret_cast<grpc_error*>(cancel_state &
~static_cast<gpr_atm>(1));
}
return GRPC_ERROR_NONE;
}
gpr_atm EncodeCancelStateError(grpc_error* error) {
return static_cast<gpr_atm>(1) | (gpr_atm)error;
return static_cast<gpr_atm>(1) | reinterpret_cast<gpr_atm>(error);
}
} // namespace
@ -203,7 +204,8 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_REF(original_error));
break;
} else {
if (gpr_atm_full_cas(&cancel_state_, original_state, (gpr_atm)closure)) {
if (gpr_atm_full_cas(&cancel_state_, original_state,
reinterpret_cast<gpr_atm>(closure))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
this, closure);
@ -212,7 +214,7 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
// closure with GRPC_ERROR_NONE. This allows callers to clean
// up any resources they may be holding for the callback.
if (original_state != 0) {
closure = (grpc_closure*)original_state;
closure = reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p", this,
@ -239,7 +241,8 @@ void CallCombiner::Cancel(grpc_error* error) {
if (gpr_atm_full_cas(&cancel_state_, original_state,
EncodeCancelStateError(error))) {
if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
grpc_closure* notify_on_cancel =
reinterpret_cast<grpc_closure*>(original_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p",

@ -146,7 +146,8 @@ static void combiner_exec(grpc_core::Combiner* lock, grpc_closure* cl,
// offload for one or two actions, and that's fine
gpr_atm initiator =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
if (initiator != 0 &&
initiator != reinterpret_cast<gpr_atm>(grpc_core::ExecCtx::Get())) {
gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
}
}

@ -166,7 +166,8 @@ static void error_destroy(grpc_error* err) {
GPR_ASSERT(!grpc_error_is_special(err));
unref_errs(err);
unref_strs(err);
gpr_free((void*)gpr_atm_acq_load(&err->atomics.error_string));
gpr_free(
reinterpret_cast<void*>(gpr_atm_acq_load(&err->atomics.error_string)));
gpr_free(err);
}
@ -237,10 +238,10 @@ static void internal_set_str(grpc_error** err, grpc_error_strs which,
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
if (slot == UINT8_MAX) {
const char* str = grpc_slice_to_c_string(value);
char* str = grpc_slice_to_c_string(value);
gpr_log(GPR_ERROR, "Error %p is full, dropping string {\"%s\":\"%s\"}",
*err, error_str_name(which), str);
gpr_free((void*)str);
gpr_free(str);
return;
}
} else {
@ -258,10 +259,10 @@ static void internal_set_time(grpc_error** err, grpc_error_times which,
if (slot == UINT8_MAX) {
slot = get_placement(err, sizeof(value));
if (slot == UINT8_MAX) {
const char* time_str = fmt_time(value);
char* time_str = fmt_time(value);
gpr_log(GPR_ERROR, "Error %p is full, dropping \"%s\":\"%s\"}", *err,
error_time_name(which), time_str);
gpr_free((void*)time_str);
gpr_free(time_str);
return;
}
}
@ -426,7 +427,8 @@ static grpc_error* copy_error_and_unref(grpc_error* in) {
// bulk memcpy of the rest of the struct.
// NOLINTNEXTLINE(bugprone-sizeof-expression)
size_t skip = sizeof(&out->atomics);
memcpy((void*)((uintptr_t)out + skip), (void*)((uintptr_t)in + skip),
memcpy(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(out) + skip),
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(in) + skip),
sizeof(*in) + (in->arena_size * sizeof(intptr_t)) - skip);
// manually set the atomics and the new capacity
gpr_atm_no_barrier_store(&out->atomics.error_string, 0);
@ -632,8 +634,8 @@ static char* fmt_str(const grpc_slice& slice) {
char* s = nullptr;
size_t sz = 0;
size_t cap = 0;
append_esc_str((const uint8_t*)GRPC_SLICE_START_PTR(slice),
GRPC_SLICE_LENGTH(slice), &s, &sz, &cap);
append_esc_str(GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice), &s, &sz,
&cap);
append_chr(0, &s, &sz, &cap);
return s;
}
@ -744,7 +746,8 @@ const char* grpc_error_string(grpc_error* err) {
if (err == GRPC_ERROR_OOM) return oom_error_string;
if (err == GRPC_ERROR_CANCELLED) return cancelled_error_string;
void* p = (void*)gpr_atm_acq_load(&err->atomics.error_string);
void* p =
reinterpret_cast<void*>(gpr_atm_acq_load(&err->atomics.error_string));
if (p != nullptr) {
return static_cast<const char*>(p);
}
@ -763,9 +766,10 @@ const char* grpc_error_string(grpc_error* err) {
char* out = finish_kvs(&kvs);
if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) {
if (!gpr_atm_rel_cas(&err->atomics.error_string, 0,
reinterpret_cast<gpr_atm>(out))) {
gpr_free(out);
out = (char*)gpr_atm_acq_load(&err->atomics.error_string);
out = reinterpret_cast<char*>(gpr_atm_acq_load(&err->atomics.error_string));
}
return out;

@ -803,7 +803,8 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
neighborhood->active_root = pollset->next = pollset->prev = pollset;
/* Make this the designated poller if there isn't one already */
if (worker->state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
gpr_atm_no_barrier_cas(&g_active_poller, 0,
reinterpret_cast<gpr_atm>(worker))) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
} else {
@ -885,8 +886,9 @@ static bool check_neighborhood_for_available_poller(
do {
switch (inspect_worker->state) {
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
if (gpr_atm_no_barrier_cas(
&g_active_poller, 0,
reinterpret_cast<gpr_atm>(inspect_worker))) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker);
@ -944,7 +946,8 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
SET_KICK_STATE(worker, KICKED);
grpc_closure_list_move(&worker->schedule_on_end_work,
grpc_core::ExecCtx::Get()->closure_list());
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (gpr_atm_no_barrier_load(&g_active_poller) ==
reinterpret_cast<gpr_atm>(worker)) {
if (worker->next != worker && worker->next->state == UNKICKED) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
@ -1071,8 +1074,9 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
std::vector<std::string> log;
log.push_back(absl::StrFormat(
"PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
(void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker));
reinterpret_cast<void*>(gpr_tls_get(&g_current_thread_pollset)),
reinterpret_cast<void*>(gpr_tls_get(&g_current_thread_worker)),
pollset->root_worker));
if (pollset->root_worker != nullptr) {
log.push_back(absl::StrFormat(
" {kick_state=%s next=%p {kick_state=%s}}",
@ -1088,7 +1092,8 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
if (specific_worker == nullptr) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (gpr_tls_get(&g_current_thread_pollset) !=
reinterpret_cast<intptr_t>(pollset)) {
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
@ -1115,8 +1120,9 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
goto done;
} else if (root_worker == next_worker && // only try and wake up a poller
// if there is no next worker
root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
&g_active_poller)) {
root_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", root_worker);
@ -1180,7 +1186,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
}
goto done;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
@ -1188,7 +1194,8 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
SET_KICK_STATE(specific_worker, KICKED);
goto done;
} else if (specific_worker ==
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick active poller");
@ -1223,7 +1230,7 @@ static void pollset_add_fd(grpc_pollset* /*pollset*/, grpc_fd* /*fd*/) {}
*/
static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
return reinterpret_cast<grpc_pollset_set*>(static_cast<intptr_t>(0xdeafbeef));
}
static void pollset_set_destroy(grpc_pollset_set* /*pss*/) {}

@ -577,7 +577,8 @@ static grpc_error* pollable_create(pollable_type type, pollable** p) {
}
struct epoll_event ev;
ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = (void*)(1 | (intptr_t) & (*p)->wakeup);
ev.data.ptr =
reinterpret_cast<void*>(1 | reinterpret_cast<intptr_t>(&(*p)->wakeup));
if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
err = GRPC_OS_ERROR(errno, "epoll_ctl");
GRPC_FD_TRACE(
@ -692,7 +693,8 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE;
}
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (gpr_tls_get(&g_current_thread_worker) ==
reinterpret_cast<intptr_t>(specific_worker)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
}
@ -729,13 +731,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GRPC_STATS_INC_POLLSET_KICK();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
(void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
"PS:%p kick %p tls_pollset=%" PRIxPTR " tls_worker=%" PRIxPTR
" pollset.root_worker=%p",
pollset, specific_worker, gpr_tls_get(&g_current_thread_pollset),
gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
}
if (specific_worker == nullptr) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (gpr_tls_get(&g_current_thread_pollset) !=
reinterpret_cast<intptr_t>(pollset)) {
if (pollset->root_worker == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
@ -881,15 +884,16 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
int n = pollable_obj->event_cursor++;
struct epoll_event* ev = &pollable_obj->events[n];
void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) {
if (1 & reinterpret_cast<intptr_t>(data_ptr)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
}
append_error(&error,
grpc_wakeup_fd_consume_wakeup(
(grpc_wakeup_fd*)((~static_cast<intptr_t>(1)) &
(intptr_t)data_ptr)),
err_desc);
append_error(
&error,
grpc_wakeup_fd_consume_wakeup(reinterpret_cast<grpc_wakeup_fd*>(
~static_cast<intptr_t>(1) &
reinterpret_cast<intptr_t>(data_ptr))),
err_desc);
} else {
grpc_fd* fd =
reinterpret_cast<grpc_fd*>(reinterpret_cast<intptr_t>(data_ptr) & ~2);

@ -775,7 +775,7 @@ static grpc_error* pollset_kick_ext(grpc_pollset* p,
}
p->kicked_without_pollers = true;
} else if (gpr_tls_get(&g_current_thread_worker) !=
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
GPR_TIMER_MARK("different_thread_worker", 0);
if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
specific_worker->reevaluate_polling_on_wakeup = true;
@ -792,18 +792,20 @@ static grpc_error* pollset_kick_ext(grpc_pollset* p,
kick_append_error(&error,
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd));
}
} else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
} else if (gpr_tls_get(&g_current_thread_poller) !=
reinterpret_cast<intptr_t>(p)) {
GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
GPR_TIMER_MARK("kick_anonymous", 0);
specific_worker = pop_front_worker(p);
if (specific_worker != nullptr) {
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (gpr_tls_get(&g_current_thread_worker) ==
reinterpret_cast<intptr_t>(specific_worker)) {
GPR_TIMER_MARK("kick_anonymous_not_self", 0);
push_back_worker(p, specific_worker);
specific_worker = pop_front_worker(p);
if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
reinterpret_cast<intptr_t>(specific_worker)) {
push_back_worker(p, specific_worker);
specific_worker = nullptr;
}
@ -987,7 +989,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
void* buf = gpr_malloc(pfd_size + watch_size);
pfds = static_cast<struct pollfd*>(buf);
watchers = static_cast<grpc_fd_watcher*>(
(void*)(static_cast<char*>(buf) + pfd_size));
static_cast<void*>((static_cast<char*>(buf) + pfd_size)));
}
fd_count = 0;

@ -283,7 +283,8 @@ void Executor::Enqueue(grpc_closure* closure, grpc_error* error,
return;
}
ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state);
ThreadState* ts =
reinterpret_cast<ThreadState*>(gpr_tls_get(&g_this_thread_state));
if (ts == nullptr) {
ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
cur_thread_count)];

@ -59,7 +59,7 @@ void grpc_iomgr_init() {
gpr_cv_init(&g_rcv);
grpc_core::Executor::InitAll();
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = (char*)"root";
g_root_object.name = const_cast<char*>("root");
grpc_iomgr_platform_init();
grpc_timer_list_init();
grpc_core::grpc_errqueue_init();

@ -96,8 +96,9 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
* referencing it. */
gpr_atm curr = gpr_atm_acq_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "LockfreeEvent::NotifyOn: %p curr=%p closure=%p", this,
(void*)curr, closure);
gpr_log(GPR_DEBUG,
"LockfreeEvent::NotifyOn: %p curr=%" PRIxPTR " closure=%p", this,
curr, closure);
}
switch (curr) {
case kClosureNotReady: {
@ -108,7 +109,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
The release itself pairs with the acquire half of a set_ready full
barrier. */
if (gpr_atm_rel_cas(&state_, kClosureNotReady, (gpr_atm)closure)) {
if (gpr_atm_rel_cas(&state_, kClosureNotReady,
reinterpret_cast<gpr_atm>(closure))) {
return; /* Successful. Return */
}
@ -137,7 +139,8 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
contains a pointer to the shutdown-error). If the fd is shutdown,
schedule the closure with the shutdown error */
if ((curr & kShutdownBit) > 0) {
grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
grpc_error* shutdown_err =
reinterpret_cast<grpc_error*>(curr & ~kShutdownBit);
ExecCtx::Run(DEBUG_LOCATION, closure,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_err, 1));
@ -157,13 +160,14 @@ void LockfreeEvent::NotifyOn(grpc_closure* closure) {
}
bool LockfreeEvent::SetShutdown(grpc_error* shutdown_error) {
gpr_atm new_state = (gpr_atm)shutdown_error | kShutdownBit;
gpr_atm new_state = reinterpret_cast<gpr_atm>(shutdown_error) | kShutdownBit;
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "LockfreeEvent::SetShutdown: %p curr=%p err=%s",
&state_, (void*)curr, grpc_error_string(shutdown_error));
gpr_log(GPR_DEBUG,
"LockfreeEvent::SetShutdown: %p curr=%" PRIxPTR " err=%s",
&state_, curr, grpc_error_string(shutdown_error));
}
switch (curr) {
case kClosureReady:
@ -190,7 +194,7 @@ bool LockfreeEvent::SetShutdown(grpc_error* shutdown_error) {
happens-after on that edge), and a release to pair with anything
loading the shutdown state. */
if (gpr_atm_full_cas(&state_, curr, new_state)) {
ExecCtx::Run(DEBUG_LOCATION, (grpc_closure*)curr,
ExecCtx::Run(DEBUG_LOCATION, reinterpret_cast<grpc_closure*>(curr),
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"FD Shutdown", &shutdown_error, 1));
return true;
@ -211,8 +215,8 @@ void LockfreeEvent::SetReady() {
gpr_atm curr = gpr_atm_no_barrier_load(&state_);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "LockfreeEvent::SetReady: %p curr=%p", &state_,
(void*)curr);
gpr_log(GPR_DEBUG, "LockfreeEvent::SetReady: %p curr=%" PRIxPTR, &state_,
curr);
}
switch (curr) {
@ -240,7 +244,8 @@ void LockfreeEvent::SetReady() {
spurious set_ready; release pairs with this or the acquire in
notify_on (or set_shutdown) */
else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
ExecCtx::Run(DEBUG_LOCATION, (grpc_closure*)curr, GRPC_ERROR_NONE);
ExecCtx::Run(DEBUG_LOCATION, reinterpret_cast<grpc_closure*>(curr),
GRPC_ERROR_NONE);
return;
}
/* else the state changed again (only possible by either a racing

@ -23,7 +23,7 @@
#include "src/core/lib/iomgr/pollset_set.h"
static grpc_pollset_set* pollset_set_create(void) {
return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
return reinterpret_cast<grpc_pollset_set*>(static_cast<intptr_t>(0xdeafbeef));
}
static void pollset_set_destroy(grpc_pollset_set* /*pollset_set*/) {}

@ -36,7 +36,7 @@ inline grpc_error* grpc_socket_error(char* error) {
}
inline char* grpc_slice_buffer_start(grpc_slice_buffer* buffer, int i) {
return (char*)GRPC_SLICE_START_PTR(buffer->slices[i]);
return reinterpret_cast<char*>(GRPC_SLICE_START_PTR(buffer->slices[i]));
}
inline int grpc_slice_buffer_length(grpc_slice_buffer* buffer, int i) {

@ -659,8 +659,8 @@ grpc_resource_quota* grpc_resource_quota_create(const char* name) {
if (name != nullptr) {
resource_quota->name = name;
} else {
resource_quota->name =
absl::StrCat("anonymous_pool_", (intptr_t)resource_quota);
resource_quota->name = absl::StrCat(
"anonymous_pool_", reinterpret_cast<intptr_t>(resource_quota));
}
GRPC_CLOSURE_INIT(&resource_quota->rq_step_closure, rq_step, resource_quota,
nullptr);
@ -807,8 +807,8 @@ grpc_resource_user* grpc_resource_user_create(
if (name != nullptr) {
resource_user->name = name;
} else {
resource_user->name =
absl::StrCat("anonymous_resource_user_", (intptr_t)resource_user);
resource_user->name = absl::StrCat(
"anonymous_resource_user_", reinterpret_cast<intptr_t>(resource_user));
}
return resource_user;
}

@ -201,8 +201,8 @@ std::string grpc_sockaddr_to_string(const grpc_resolved_address* resolved_addr,
void grpc_string_to_sockaddr(grpc_resolved_address* out, const char* addr,
int port) {
memset(out, 0, sizeof(grpc_resolved_address));
grpc_sockaddr_in6* addr6 = (grpc_sockaddr_in6*)out->addr;
grpc_sockaddr_in* addr4 = (grpc_sockaddr_in*)out->addr;
grpc_sockaddr_in6* addr6 = reinterpret_cast<grpc_sockaddr_in6*>(out->addr);
grpc_sockaddr_in* addr4 = reinterpret_cast<grpc_sockaddr_in*>(out->addr);
if (grpc_inet_pton(GRPC_AF_INET6, addr, &addr6->sin6_addr) == 1) {
addr6->sin6_family = GRPC_AF_INET6;
out->len = sizeof(grpc_sockaddr_in6);
@ -260,9 +260,11 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case GRPC_AF_INET:
return grpc_ntohs(((grpc_sockaddr_in*)addr)->sin_port);
return grpc_ntohs(
(reinterpret_cast<const grpc_sockaddr_in*>(addr))->sin_port);
case GRPC_AF_INET6:
return grpc_ntohs(((grpc_sockaddr_in6*)addr)->sin6_port);
return grpc_ntohs(
(reinterpret_cast<const grpc_sockaddr_in6*>(addr))->sin6_port);
default:
if (grpc_is_unix_socket(resolved_addr)) {
return 1;
@ -273,19 +275,17 @@ int grpc_sockaddr_get_port(const grpc_resolved_address* resolved_addr) {
}
}
int grpc_sockaddr_set_port(const grpc_resolved_address* resolved_addr,
int port) {
const grpc_sockaddr* addr =
reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr);
int grpc_sockaddr_set_port(grpc_resolved_address* resolved_addr, int port) {
grpc_sockaddr* addr = reinterpret_cast<grpc_sockaddr*>(resolved_addr->addr);
switch (addr->sa_family) {
case GRPC_AF_INET:
GPR_ASSERT(port >= 0 && port < 65536);
((grpc_sockaddr_in*)addr)->sin_port =
(reinterpret_cast<grpc_sockaddr_in*>(addr))->sin_port =
grpc_htons(static_cast<uint16_t>(port));
return 1;
case GRPC_AF_INET6:
GPR_ASSERT(port >= 0 && port < 65536);
((grpc_sockaddr_in6*)addr)->sin6_port =
(reinterpret_cast<grpc_sockaddr_in6*>(addr))->sin6_port =
grpc_htons(static_cast<uint16_t>(port));
return 1;
default:

@ -56,7 +56,7 @@ void grpc_sockaddr_make_wildcard6(int port, grpc_resolved_address* wild_out);
int grpc_sockaddr_get_port(const grpc_resolved_address* addr);
/* Set IP port number of a sockaddr */
int grpc_sockaddr_set_port(const grpc_resolved_address* addr, int port);
int grpc_sockaddr_set_port(grpc_resolved_address* addr, int port);
// Converts a sockaddr into a newly-allocated human-readable string.
//

@ -87,8 +87,9 @@ static const grpc_arg_pointer_vtable socket_factory_arg_vtable = {
socket_factory_arg_copy, socket_factory_arg_destroy, socket_factory_cmp};
grpc_arg grpc_socket_factory_to_arg(grpc_socket_factory* factory) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_FACTORY,
factory, &socket_factory_arg_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_SOCKET_FACTORY), factory,
&socket_factory_arg_vtable);
}
#endif

@ -78,6 +78,7 @@ static const grpc_arg_pointer_vtable socket_mutator_arg_vtable = {
socket_mutator_arg_copy, socket_mutator_arg_destroy, socket_mutator_cmp};
grpc_arg grpc_socket_mutator_to_arg(grpc_socket_mutator* mutator) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SOCKET_MUTATOR,
mutator, &socket_mutator_arg_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_SOCKET_MUTATOR), mutator,
&socket_mutator_arg_vtable);
}

@ -61,7 +61,7 @@ static void custom_close_callback(grpc_custom_socket* /*socket*/) {}
static void on_alarm(void* acp, grpc_error* error) {
int done;
grpc_custom_socket* socket = (grpc_custom_socket*)acp;
grpc_custom_socket* socket = static_cast<grpc_custom_socket*>(acp);
grpc_custom_tcp_connect* connect = socket->connector;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
const char* str = grpc_error_string(error);
@ -124,13 +124,14 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
resource_quota =
grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
channel_args->args[i].value.pointer.p));
}
}
}
grpc_custom_socket* socket =
(grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
static_cast<grpc_custom_socket*>(gpr_malloc(sizeof(grpc_custom_socket)));
socket->refs = 2;
grpc_custom_socket_vtable->init(socket, GRPC_AF_UNSPEC);
grpc_custom_tcp_connect* connect = new grpc_custom_tcp_connect();
@ -153,8 +154,8 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
grpc_schedule_on_exec_ctx);
grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
grpc_custom_socket_vtable->connect(
socket, (const grpc_sockaddr*)resolved_addr->addr, resolved_addr->len,
custom_connect_callback);
socket, reinterpret_cast<const grpc_sockaddr*>(resolved_addr->addr),
resolved_addr->len, custom_connect_callback);
}
grpc_tcp_client_vtable custom_tcp_client_vtable = {tcp_connect};

@ -73,7 +73,8 @@ struct custom_tcp_endpoint {
std::string local_address;
};
static void tcp_free(grpc_custom_socket* s) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)s->endpoint;
custom_tcp_endpoint* tcp =
reinterpret_cast<custom_tcp_endpoint*>(s->endpoint);
grpc_resource_user_unref(tcp->resource_user);
delete tcp;
s->refs--;
@ -149,18 +150,19 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
grpc_slice_buffer garbage;
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
custom_tcp_endpoint* tcp =
reinterpret_cast<custom_tcp_endpoint*>(socket->endpoint);
if (error == GRPC_ERROR_NONE && nread == 0) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF");
}
if (error == GRPC_ERROR_NONE) {
// Successful read
if ((size_t)nread < tcp->read_slices->length) {
if (nread < tcp->read_slices->length) {
/* TODO(murgatroid99): Instead of discarding the unused part of the read
* buffer, reuse it as the next read buffer. */
grpc_slice_buffer_init(&garbage);
grpc_slice_buffer_trim_end(
tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
grpc_slice_buffer_trim_end(tcp->read_slices,
tcp->read_slices->length - nread, &garbage);
grpc_slice_buffer_reset_and_unref_internal(&garbage);
}
} else {
@ -170,7 +172,7 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
}
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
custom_tcp_endpoint* tcp = static_cast<custom_tcp_endpoint*>(tcpp);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_string(error));
@ -179,7 +181,8 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
/* Before calling read, we allocate a buffer with exactly one slice
* to tcp->read_slices and wait for the callback indicating that the
* allocation was successful. So slices[0] should always exist here */
char* buffer = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
char* buffer = reinterpret_cast<char*>(
GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]));
size_t len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
grpc_custom_socket_vtable->read(tcp->socket, buffer, len,
custom_read_callback);
@ -195,7 +198,7 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb, bool /*urgent*/) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
GPR_ASSERT(tcp->read_cb == nullptr);
tcp->read_cb = cb;
@ -213,7 +216,8 @@ static void custom_write_callback(grpc_custom_socket* socket,
grpc_error* error) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
custom_tcp_endpoint* tcp =
reinterpret_cast<custom_tcp_endpoint*>(socket->endpoint);
grpc_closure* cb = tcp->write_cb;
tcp->write_cb = nullptr;
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
@ -226,7 +230,7 @@ static void custom_write_callback(grpc_custom_socket* socket,
static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
grpc_closure* cb, void* /*arg*/) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
@ -284,7 +288,7 @@ static void endpoint_delete_from_pollset_set(grpc_endpoint* ep,
}
static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
if (!tcp->shutting_down) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
const char* str = grpc_error_string(why);
@ -309,28 +313,29 @@ static void custom_close_callback(grpc_custom_socket* socket) {
} else if (socket->endpoint) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)socket->endpoint;
custom_tcp_endpoint* tcp =
reinterpret_cast<custom_tcp_endpoint*>(socket->endpoint);
TCP_UNREF(tcp, "destroy");
}
}
static void endpoint_destroy(grpc_endpoint* ep) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
grpc_custom_socket_vtable->close(tcp->socket, custom_close_callback);
}
static absl::string_view endpoint_get_peer(grpc_endpoint* ep) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
return tcp->peer_string;
}
static absl::string_view endpoint_get_local_address(grpc_endpoint* ep) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
return tcp->local_address;
}
static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)ep;
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
return tcp->resource_user;
}
@ -362,7 +367,7 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
}
socket->refs++;
socket->endpoint = (grpc_endpoint*)tcp;
socket->endpoint = reinterpret_cast<grpc_endpoint*>(tcp);
tcp->socket = socket;
tcp->base.vtable = &vtable;
gpr_ref_init(&tcp->refcount, 1);

@ -465,7 +465,8 @@ static void run_poller(void* bp, grpc_error* /*error_ignored*/) {
if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
bool cas_ok =
gpr_atm_full_cas(&g_backup_poller, reinterpret_cast<gpr_atm>(p), 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
@ -487,7 +488,8 @@ static void run_poller(void* bp, grpc_error* /*error_ignored*/) {
}
static void drop_uncovered(grpc_tcp* /*tcp*/) {
backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
backup_poller* p =
reinterpret_cast<backup_poller*>(gpr_atm_acq_load(&g_backup_poller));
gpr_atm old_count =
gpr_atm_full_fetch_add(&g_uncovered_notifications_pending, -1);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
@ -526,8 +528,8 @@ static void cover_self(grpc_tcp* tcp) {
GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
} else {
while ((p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller)) ==
nullptr) {
while ((p = reinterpret_cast<backup_poller*>(
gpr_atm_acq_load(&g_backup_poller))) == nullptr) {
// spin waiting for backup poller
}
}

@ -83,7 +83,8 @@ struct grpc_tcp_server {
static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
grpc_tcp_server* s =
static_cast<grpc_tcp_server*>(gpr_malloc(sizeof(grpc_tcp_server)));
// Let the implementation decide if so_reuseport can be enabled or not.
s->so_reuseport = true;
s->resource_quota = grpc_resource_quota_create(nullptr);
@ -95,7 +96,7 @@ static grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_unref_internal(s->resource_quota);
s->resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)args->args[i].value.pointer.p);
static_cast<grpc_resource_quota*>(args->args[i].value.pointer.p));
} else {
grpc_resource_quota_unref_internal(s->resource_quota);
gpr_free(s);
@ -213,7 +214,7 @@ static void tcp_server_unref(grpc_tcp_server* s) {
static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
grpc_tcp_server_acceptor* acceptor =
(grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
static_cast<grpc_tcp_server_acceptor*>(gpr_malloc(sizeof(*acceptor)));
grpc_endpoint* ep = nullptr;
grpc_resolved_address peer_name;
std::string peer_name_string;
@ -222,7 +223,8 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
memset(&peer_name, 0, sizeof(grpc_resolved_address));
peer_name.len = GRPC_MAX_SOCKADDR_SIZE;
err = grpc_custom_socket_vtable->getpeername(
socket, (grpc_sockaddr*)&peer_name.addr, (int*)&peer_name.len);
socket, reinterpret_cast<grpc_sockaddr*>(&peer_name.addr),
reinterpret_cast<int*>(&peer_name.len));
if (err == GRPC_ERROR_NONE) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
@ -262,8 +264,8 @@ static void custom_accept_callback(grpc_custom_socket* socket,
}
finish_accept(sp, client);
if (!sp->closed) {
grpc_custom_socket* new_socket =
(grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
grpc_custom_socket* new_socket = static_cast<grpc_custom_socket*>(
gpr_malloc(sizeof(grpc_custom_socket)));
new_socket->endpoint = nullptr;
new_socket->listener = nullptr;
new_socket->connector = nullptr;
@ -290,8 +292,9 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s,
flags |= GRPC_CUSTOM_SOCKET_OPT_SO_REUSEPORT;
}
error = grpc_custom_socket_vtable->bind(socket, (grpc_sockaddr*)addr->addr,
addr->len, flags);
error = grpc_custom_socket_vtable->bind(
socket, reinterpret_cast<grpc_sockaddr*>(const_cast<char*>(addr->addr)),
addr->len, flags);
if (error != GRPC_ERROR_NONE) {
return error;
}
@ -303,7 +306,8 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s,
sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE;
error = grpc_custom_socket_vtable->getsockname(
socket, (grpc_sockaddr*)&sockname_temp.addr, (int*)&sockname_temp.len);
socket, reinterpret_cast<grpc_sockaddr*>(&sockname_temp.addr),
reinterpret_cast<int*>(&sockname_temp.len));
if (error != GRPC_ERROR_NONE) {
return error;
}
@ -312,7 +316,7 @@ static grpc_error* add_socket_to_server(grpc_tcp_server* s,
GPR_ASSERT(port >= 0);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
sp = (grpc_tcp_listener*)gpr_zalloc(sizeof(grpc_tcp_listener));
sp = static_cast<grpc_tcp_listener*>(gpr_zalloc(sizeof(grpc_tcp_listener)));
sp->next = nullptr;
if (s->head == nullptr) {
s->head = sp;
@ -358,12 +362,13 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
socket = sp->socket;
sockname_temp.len = GRPC_MAX_SOCKADDR_SIZE;
if (nullptr == grpc_custom_socket_vtable->getsockname(
socket, (grpc_sockaddr*)&sockname_temp.addr,
(int*)&sockname_temp.len)) {
socket,
reinterpret_cast<grpc_sockaddr*>(&sockname_temp.addr),
reinterpret_cast<int*>(&sockname_temp.len))) {
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
allocated_addr = static_cast<grpc_resolved_address*>(
gpr_malloc(sizeof(grpc_resolved_address)));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;
@ -391,7 +396,8 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
}
family = grpc_sockaddr_get_family(addr);
socket = (grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
socket =
static_cast<grpc_custom_socket*>(gpr_malloc(sizeof(grpc_custom_socket)));
socket->refs = 1;
socket->endpoint = nullptr;
socket->listener = nullptr;
@ -430,8 +436,8 @@ static void tcp_server_start(grpc_tcp_server* server,
server->on_accept_cb = on_accept_cb;
server->on_accept_cb_arg = cb_arg;
for (sp = server->head; sp; sp = sp->next) {
grpc_custom_socket* new_socket =
(grpc_custom_socket*)gpr_malloc(sizeof(grpc_custom_socket));
grpc_custom_socket* new_socket = static_cast<grpc_custom_socket*>(
gpr_malloc(sizeof(grpc_custom_socket)));
new_socket->endpoint = nullptr;
new_socket->listener = nullptr;
new_socket->connector = nullptr;

@ -57,16 +57,16 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
timer->pending = true;
timer->closure = closure;
grpc_custom_timer* timer_wrapper =
(grpc_custom_timer*)gpr_malloc(sizeof(grpc_custom_timer));
static_cast<grpc_custom_timer*>(gpr_malloc(sizeof(grpc_custom_timer)));
timer_wrapper->timeout_ms = timeout;
timer->custom_timer = (void*)timer_wrapper;
timer->custom_timer = timer_wrapper;
timer_wrapper->original = timer;
custom_timer_impl->start(timer_wrapper);
}
static void timer_cancel(grpc_timer* timer) {
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
grpc_custom_timer* tw = (grpc_custom_timer*)timer->custom_timer;
grpc_custom_timer* tw = static_cast<grpc_custom_timer*>(timer->custom_timer);
if (timer->pending) {
timer->pending = false;
grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure,

@ -570,8 +570,7 @@ static int add_socket_to_server(grpc_udp_server* s, int fd,
return port;
}
int grpc_udp_server_add_port(grpc_udp_server* s,
const grpc_resolved_address* addr,
int grpc_udp_server_add_port(grpc_udp_server* s, grpc_resolved_address* addr,
int rcv_buf_size, int snd_buf_size,
GrpcUdpHandlerFactory* handler_factory,
size_t num_listeners) {

@ -93,8 +93,7 @@ int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index);
/* TODO(ctiller): deprecate this, and make grpc_udp_server_add_ports to handle
all of the multiple socket port matching logic in one place */
int grpc_udp_server_add_port(grpc_udp_server* s,
const grpc_resolved_address* addr,
int grpc_udp_server_add_port(grpc_udp_server* s, grpc_resolved_address* addr,
int rcv_buf_size, int snd_buf_size,
GrpcUdpHandlerFactory* handler_factory,
size_t num_listeners);

@ -96,16 +96,15 @@ std::string grpc_sockaddr_to_uri_unix_if_possible(
if (addr->sa_family != AF_UNIX) {
return "";
}
if (((struct sockaddr_un*)addr)->sun_path[0] == '\0' &&
((struct sockaddr_un*)addr)->sun_path[1] != '\0') {
const struct sockaddr_un* un =
reinterpret_cast<const struct sockaddr_un*>(resolved_addr->addr);
const auto* unix_addr = reinterpret_cast<const struct sockaddr_un*>(addr);
if (unix_addr->sun_path[0] == '\0' && unix_addr->sun_path[1] != '\0') {
return absl::StrCat(
"unix-abstract:",
absl::string_view(un->sun_path + 1,
resolved_addr->len - sizeof(un->sun_family) - 1));
absl::string_view(
unix_addr->sun_path + 1,
resolved_addr->len - sizeof(unix_addr->sun_family) - 1));
}
return absl::StrCat("unix:", ((struct sockaddr_un*)addr)->sun_path);
return absl::StrCat("unix:", unix_addr->sun_path);
}
#endif

@ -295,8 +295,9 @@ static const grpc_arg_pointer_vtable auth_context_pointer_vtable = {
auth_context_pointer_cmp};
grpc_arg grpc_auth_context_to_arg(grpc_auth_context* c) {
return grpc_channel_arg_pointer_create((char*)GRPC_AUTH_CONTEXT_ARG, c,
&auth_context_pointer_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_AUTH_CONTEXT_ARG), c,
&auth_context_pointer_vtable);
}
grpc_auth_context* grpc_auth_context_from_arg(const grpc_arg* arg) {

@ -67,9 +67,9 @@ static const grpc_arg_pointer_vtable credentials_pointer_vtable = {
grpc_arg grpc_channel_credentials_to_arg(
grpc_channel_credentials* credentials) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CHANNEL_CREDENTIALS,
credentials,
&credentials_pointer_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_CHANNEL_CREDENTIALS), credentials,
&credentials_pointer_vtable);
}
grpc_channel_credentials* grpc_channel_credentials_from_arg(
@ -135,8 +135,8 @@ static const grpc_arg_pointer_vtable cred_ptr_vtable = {
server_credentials_pointer_cmp};
grpc_arg grpc_server_credentials_to_arg(grpc_server_credentials* c) {
return grpc_channel_arg_pointer_create((char*)GRPC_SERVER_CREDENTIALS_ARG, c,
&cred_ptr_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_SERVER_CREDENTIALS_ARG), c, &cred_ptr_vtable);
}
grpc_server_credentials* grpc_server_credentials_from_arg(const grpc_arg* arg) {

@ -55,7 +55,8 @@ std::string HMAC(const std::string& key, const std::string& msg) {
unsigned int len;
unsigned char digest[EVP_MAX_MD_SIZE];
HMAC(EVP_sha256(), key.c_str(), key.length(),
(const unsigned char*)msg.c_str(), msg.length(), digest, &len);
reinterpret_cast<const unsigned char*>(msg.c_str()), msg.length(),
digest, &len);
return std::string(digest, digest + len);
}

@ -76,7 +76,8 @@ grpc_fake_transport_security_server_credentials_create() {
grpc_arg grpc_fake_transport_expected_targets_arg(char* expected_targets) {
return grpc_channel_arg_string_create(
(char*)GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
const_cast<char*>(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS),
expected_targets);
}
const char* grpc_fake_transport_get_expected_targets(

@ -175,8 +175,8 @@ static int is_metadata_server_reachable() {
detector.is_done = 0;
detector.success = 0;
memset(&request, 0, sizeof(grpc_httpcli_request));
request.host = (char*)GRPC_COMPUTE_ENGINE_DETECTION_HOST;
request.http.path = (char*)"/";
request.host = const_cast<char*>(GRPC_COMPUTE_ENGINE_DETECTION_HOST);
request.http.path = const_cast<char*>("/");
grpc_httpcli_context_init(&context);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("google_default_credentials");

@ -112,7 +112,7 @@ grpc_auth_json_key grpc_auth_json_key_create_from_json(const Json& json) {
goto end;
}
result.private_key =
PEM_read_bio_RSAPrivateKey(bio, nullptr, nullptr, (void*)"");
PEM_read_bio_RSAPrivateKey(bio, nullptr, nullptr, const_cast<char*>(""));
if (result.private_key == nullptr) {
gpr_log(GPR_ERROR, "Could not deserialize private key.");
goto end;

@ -696,7 +696,7 @@ static void on_openid_config_retrieved(void* user_data, grpc_error* /*error*/) {
req.host = gpr_strdup(jwks_uri);
req.http.path = const_cast<char*>(strchr(jwks_uri, '/'));
if (req.http.path == nullptr) {
req.http.path = (char*)"";
req.http.path = const_cast<char*>("");
} else {
*(req.host + (req.http.path - jwks_uri)) = '\0';
}
@ -757,8 +757,8 @@ const char* grpc_jwt_issuer_email_domain(const char* issuer) {
if (dot == nullptr || dot == email_domain) return email_domain;
GPR_ASSERT(dot > email_domain);
/* There may be a subdomain, we just want the domain. */
dot = static_cast<const char*>(gpr_memrchr(
(void*)email_domain, '.', static_cast<size_t>(dot - email_domain)));
dot = static_cast<const char*>(
gpr_memrchr(email_domain, '.', static_cast<size_t>(dot - email_domain)));
if (dot == nullptr) return email_domain;
return dot + 1;
}

@ -386,8 +386,9 @@ class grpc_compute_engine_token_fetcher_credentials
const_cast<char*>("Google")};
grpc_httpcli_request request;
memset(&request, 0, sizeof(grpc_httpcli_request));
request.host = (char*)GRPC_COMPUTE_ENGINE_METADATA_HOST;
request.http.path = (char*)GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH;
request.host = const_cast<char*>(GRPC_COMPUTE_ENGINE_METADATA_HOST);
request.http.path =
const_cast<char*>(GRPC_COMPUTE_ENGINE_METADATA_TOKEN_PATH);
request.http.hdr_count = 1;
request.http.hdrs = &header;
/* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
@ -445,8 +446,8 @@ void grpc_google_refresh_token_credentials::fetch_oauth2(
GRPC_REFRESH_TOKEN_POST_BODY_FORMAT_STRING, refresh_token_.client_id,
refresh_token_.client_secret, refresh_token_.refresh_token);
memset(&request, 0, sizeof(grpc_httpcli_request));
request.host = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_HOST;
request.http.path = (char*)GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH;
request.host = const_cast<char*>(GRPC_GOOGLE_OAUTH2_SERVICE_HOST);
request.http.path = const_cast<char*>(GRPC_GOOGLE_OAUTH2_SERVICE_TOKEN_PATH);
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = &grpc_httpcli_ssl;
@ -577,8 +578,8 @@ class StsTokenFetcherCredentials
const_cast<char*>("application/x-www-form-urlencoded")};
grpc_httpcli_request request;
memset(&request, 0, sizeof(grpc_httpcli_request));
request.host = (char*)sts_url_->authority;
request.http.path = (char*)sts_url_->path;
request.host = sts_url_->authority;
request.http.path = sts_url_->path;
request.http.hdr_count = 1;
request.http.hdrs = &header;
request.handshaker = (strcmp(sts_url_->scheme, "https") == 0)

@ -224,7 +224,7 @@ bool grpc_plugin_credentials::get_request_metadata(
grpc_slice_unref_internal(creds_md[i].key);
grpc_slice_unref_internal(creds_md[i].value);
}
gpr_free((void*)error_details);
gpr_free(const_cast<char*>(error_details));
gpr_free(request);
}
return retval;

@ -38,8 +38,8 @@ void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp,
size_t num_key_cert_pairs) {
if (kp == nullptr) return;
for (size_t i = 0; i < num_key_cert_pairs; i++) {
gpr_free((void*)kp[i].private_key);
gpr_free((void*)kp[i].cert_chain);
gpr_free(const_cast<char*>(kp[i].private_key));
gpr_free(const_cast<char*>(kp[i].cert_chain));
}
gpr_free(kp);
}
@ -87,7 +87,7 @@ grpc_ssl_credentials::create_security_connector(
return sc;
}
grpc_arg new_arg = grpc_channel_arg_string_create(
(char*)GRPC_ARG_HTTP2_SCHEME, (char*)"https");
const_cast<char*>(GRPC_ARG_HTTP2_SCHEME), const_cast<char*>("https"));
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
return sc;
}
@ -262,8 +262,8 @@ void grpc_ssl_server_certificate_config_destroy(
grpc_ssl_server_certificate_config* config) {
if (config == nullptr) return;
for (size_t i = 0; i < config->num_key_cert_pairs; i++) {
gpr_free((void*)config->pem_key_cert_pairs[i].private_key);
gpr_free((void*)config->pem_key_cert_pairs[i].cert_chain);
gpr_free(const_cast<char*>(config->pem_key_cert_pairs[i].private_key));
gpr_free(const_cast<char*>(config->pem_key_cert_pairs[i].cert_chain));
}
gpr_free(config->pem_key_cert_pairs);
gpr_free(config->pem_root_certs);

@ -172,7 +172,7 @@ FileWatcherCertificateProvider::~FileWatcherCertificateProvider() {
// Reset distributor's callback to make sure the callback won't be invoked
// again after this object(provider) is destroyed.
distributor_->SetWatchStatusCallback(nullptr);
gpr_event_set(&shutdown_event_, (void*)(1));
gpr_event_set(&shutdown_event_, reinterpret_cast<void*>(1));
refresh_thread_.Join();
}

@ -46,7 +46,7 @@ grpc_tls_server_authorization_check_config::
grpc_tls_server_authorization_check_config::
~grpc_tls_server_authorization_check_config() {
if (destruct_ != nullptr) {
destruct_((void*)config_user_data_);
destruct_(config_user_data_);
}
}

@ -92,7 +92,7 @@ TlsCredentials::create_security_connector(
}
if (args != nullptr) {
grpc_arg new_arg = grpc_channel_arg_string_create(
(char*)GRPC_ARG_HTTP2_SCHEME, (char*)"https");
const_cast<char*>(GRPC_ARG_HTTP2_SCHEME), const_cast<char*>("https"));
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
}
return sc;

@ -104,8 +104,9 @@ static const grpc_arg_pointer_vtable connector_arg_vtable = {
connector_arg_copy, connector_arg_destroy, connector_cmp};
grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc) {
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SECURITY_CONNECTOR, sc,
&connector_arg_vtable);
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_SECURITY_CONNECTOR), sc,
&connector_arg_vtable);
}
grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg) {

@ -111,7 +111,7 @@ class grpc_ssl_channel_security_connector final
const tsi_result result =
tsi_create_ssl_client_handshaker_factory_with_options(
&options, &client_handshaker_factory_);
gpr_free((void*)options.alpn_protocols);
gpr_free(options.alpn_protocols);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@ -258,7 +258,7 @@ class grpc_ssl_server_security_connector
const tsi_result result =
tsi_create_ssl_server_handshaker_factory_with_options(
&options, &server_handshaker_factory_);
gpr_free((void*)alpn_protocol_strings);
gpr_free(alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@ -368,7 +368,7 @@ class grpc_ssl_server_security_connector
grpc_tsi_ssl_pem_key_cert_pairs_destroy(
const_cast<tsi_ssl_pem_key_cert_pair*>(options.pem_key_cert_pairs),
options.num_key_cert_pairs);
gpr_free((void*)alpn_protocol_strings);
gpr_free(alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",

@ -427,7 +427,7 @@ grpc_security_status grpc_ssl_tsi_client_handshaker_factory_init(
const tsi_result result =
tsi_create_ssl_client_handshaker_factory_with_options(&options,
handshaker_factory);
gpr_free((void*)options.alpn_protocols);
gpr_free(options.alpn_protocols);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));
@ -459,7 +459,7 @@ grpc_security_status grpc_ssl_tsi_server_handshaker_factory_init(
const tsi_result result =
tsi_create_ssl_server_handshaker_factory_with_options(&options,
handshaker_factory);
gpr_free((void*)alpn_protocol_strings);
gpr_free(alpn_protocol_strings);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
tsi_result_to_string(result));

@ -419,9 +419,9 @@ void TlsChannelSecurityConnector::ServerAuthorizationCheckArgDestroy(
if (arg == nullptr) {
return;
}
gpr_free((void*)arg->target_name);
gpr_free((void*)arg->peer_cert);
if (arg->peer_cert_full_chain) gpr_free((void*)arg->peer_cert_full_chain);
gpr_free(const_cast<char*>(arg->target_name));
gpr_free(const_cast<char*>(arg->peer_cert));
gpr_free(const_cast<char*>(arg->peer_cert_full_chain));
delete arg->error_details;
if (arg->destroy_context != nullptr) {
arg->destroy_context(arg->context);

@ -241,8 +241,8 @@ void SecurityHandshaker::OnPeerCheckedInner(grpc_error* error) {
handshaker_result_, &unused_bytes, &unused_bytes_size);
// Create secure endpoint.
if (unused_bytes_size > 0) {
grpc_slice slice =
grpc_slice_from_copied_buffer((char*)unused_bytes, unused_bytes_size);
grpc_slice slice = grpc_slice_from_copied_buffer(
reinterpret_cast<const char*>(unused_bytes), unused_bytes_size);
args_->endpoint = grpc_secure_endpoint_create(
protector, zero_copy_protector, args_->endpoint, &slice, 1);
grpc_slice_unref_internal(slice);

@ -310,20 +310,24 @@ void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
}
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
parent_call* p =
reinterpret_cast<parent_call*>(gpr_atm_acq_load(&call->parent_call_atm));
if (p == nullptr) {
p = call->arena->New<parent_call>();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
if (!gpr_atm_rel_cas(&call->parent_call_atm,
reinterpret_cast<gpr_atm>(nullptr),
reinterpret_cast<gpr_atm>(p))) {
p->~parent_call();
p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
p = reinterpret_cast<parent_call*>(
gpr_atm_acq_load(&call->parent_call_atm));
}
}
return p;
}
static parent_call* get_parent_call(grpc_call* call) {
return (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
return reinterpret_cast<parent_call*>(
gpr_atm_acq_load(&call->parent_call_atm));
}
size_t grpc_call_get_initial_size_estimate() {
@ -649,7 +653,8 @@ static void execute_batch(grpc_call* call,
}
char* grpc_call_get_peer(grpc_call* call) {
char* peer_string = (char*)gpr_atm_acq_load(&call->peer_string);
char* peer_string =
reinterpret_cast<char*>(gpr_atm_acq_load(&call->peer_string));
if (peer_string != nullptr) return gpr_strdup(peer_string);
peer_string = grpc_channel_get_target(call->channel);
if (peer_string != nullptr) return peer_string;
@ -828,8 +833,8 @@ static void set_encodings_accepted_by_peer(grpc_call* /*call*/,
accepted_user_data =
grpc_mdelem_get_user_data(mdel, destroy_encodings_accepted_by_peer);
if (accepted_user_data != nullptr) {
*encodings_accepted_by_peer =
static_cast<uint32_t>(((uintptr_t)accepted_user_data) - 1);
*encodings_accepted_by_peer = static_cast<uint32_t>(
reinterpret_cast<uintptr_t>(accepted_user_data) - 1);
return;
}
@ -869,7 +874,8 @@ static void set_encodings_accepted_by_peer(grpc_call* /*call*/,
grpc_mdelem_set_user_data(
mdel, destroy_encodings_accepted_by_peer,
(void*)((static_cast<uintptr_t>(*encodings_accepted_by_peer)) + 1));
reinterpret_cast<void*>(
static_cast<uintptr_t>(*encodings_accepted_by_peer) + 1));
}
uint32_t grpc_call_test_only_get_encodings_accepted_by_peer(grpc_call* call) {
@ -883,8 +889,8 @@ grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call) {
return call->incoming_stream_compression_algorithm;
}
static grpc_linked_mdelem* linked_from_md(const grpc_metadata* md) {
return (grpc_linked_mdelem*)&md->internal_data;
static grpc_linked_mdelem* linked_from_md(grpc_metadata* md) {
return reinterpret_cast<grpc_linked_mdelem*>(&md->internal_data);
}
static grpc_metadata* get_md_elem(grpc_metadata* metadata,
@ -907,8 +913,7 @@ static int prepare_application_metadata(grpc_call* call, int count,
grpc_metadata_batch* batch =
&call->metadata_batch[0 /* is_receiving */][is_trailing];
for (i = 0; i < total_count; i++) {
const grpc_metadata* md =
get_md_elem(metadata, additional_metadata, i, count);
grpc_metadata* md = get_md_elem(metadata, additional_metadata, i, count);
grpc_linked_mdelem* l = linked_from_md(md);
GPR_ASSERT(sizeof(grpc_linked_mdelem) == sizeof(md->internal_data));
if (!GRPC_LOG_IF_ERROR("validate_metadata",
@ -927,8 +932,7 @@ static int prepare_application_metadata(grpc_call* call, int count,
}
if (i != total_count) {
for (int j = 0; j < i; j++) {
const grpc_metadata* md =
get_md_elem(metadata, additional_metadata, j, count);
grpc_metadata* md = get_md_elem(metadata, additional_metadata, j, count);
grpc_linked_mdelem* l = linked_from_md(md);
GRPC_MDELEM_UNREF(l->md);
}
@ -1230,9 +1234,10 @@ static void post_batch_completion(batch_control* bctl) {
if (bctl->completion_data.notify_tag.is_closure) {
/* unrefs error */
bctl->call = nullptr;
grpc_core::Closure::Run(DEBUG_LOCATION,
(grpc_closure*)bctl->completion_data.notify_tag.tag,
error);
grpc_core::Closure::Run(
DEBUG_LOCATION,
static_cast<grpc_closure*>(bctl->completion_data.notify_tag.tag),
error);
GRPC_CALL_INTERNAL_UNREF(call, "completion");
} else {
/* unrefs error */
@ -1356,7 +1361,8 @@ static void receiving_stream_ready(void* bctlp, grpc_error* error) {
* object with rel_cas, and will not use it after the cas. Its corresponding
* acq_load is in receiving_initial_metadata_ready() */
if (error != GRPC_ERROR_NONE || call->receiving_stream == nullptr ||
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
!gpr_atm_rel_cas(&call->recv_state, RECV_NONE,
reinterpret_cast<gpr_atm>(bctlp))) {
process_data_after_md(bctl);
}
}
@ -1570,7 +1576,8 @@ static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
static_cast<grpc_cq_completion*>(
gpr_malloc(sizeof(grpc_cq_completion))));
} else {
grpc_core::Closure::Run(DEBUG_LOCATION, (grpc_closure*)notify_tag,
grpc_core::Closure::Run(DEBUG_LOCATION,
static_cast<grpc_closure*>(notify_tag),
GRPC_ERROR_NONE);
}
error = GRPC_CALL_OK;

@ -87,7 +87,7 @@ void grpc_channel_init_shutdown(void) {
for (int i = 0; i < GRPC_NUM_CHANNEL_STACK_TYPES; i++) {
gpr_free(g_slots[i].slots);
g_slots[i].slots =
static_cast<stage_slot*>((void*)static_cast<uintptr_t>(0xdeadbeef));
static_cast<stage_slot*>(reinterpret_cast<void*>(0xdeadbeef));
}
}

@ -447,7 +447,8 @@ void grpc_cq_global_init() {
}
void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == nullptr) {
if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)0);
gpr_tls_set(&g_cached_cq, (intptr_t)cq);
}
@ -456,10 +457,10 @@ void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
int grpc_completion_queue_thread_local_cache_flush(grpc_completion_queue* cq,
void** tag, int* ok) {
grpc_cq_completion* storage =
(grpc_cq_completion*)gpr_tls_get(&g_cached_event);
reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event));
int ret = 0;
if (storage != nullptr &&
(grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
if (storage != nullptr && reinterpret_cast<grpc_completion_queue*>(
gpr_tls_get(&g_cached_cq)) == cq) {
*tag = storage->tag;
grpc_core::ExecCtx exec_ctx;
*ok = (storage->next & static_cast<uintptr_t>(1)) == 1;
@ -717,8 +718,10 @@ static void cq_end_op_for_next(
cq_check_tag(cq, tag, true); /* Used in debug builds only */
if ((grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq &&
(grpc_cq_completion*)gpr_tls_get(&g_cached_event) == nullptr) {
if (reinterpret_cast<grpc_completion_queue*>(gpr_tls_get(&g_cached_cq)) ==
cq &&
reinterpret_cast<grpc_cq_completion*>(gpr_tls_get(&g_cached_event)) ==
nullptr) {
gpr_tls_set(&g_cached_event, (intptr_t)storage);
} else {
/* Add the completion to the queue */
@ -793,8 +796,8 @@ static void cq_end_op_for_pluck(
storage->tag = tag;
storage->done = done;
storage->done_arg = done_arg;
storage->next =
((uintptr_t)&cqd->completed_head) | (static_cast<uintptr_t>(is_success));
storage->next = reinterpret_cast<uintptr_t>(&cqd->completed_head) |
static_cast<uintptr_t>(is_success);
gpr_mu_lock(cq->mu);
cq_check_tag(cq, tag, false); /* Used in debug builds only */
@ -802,7 +805,7 @@ static void cq_end_op_for_pluck(
/* Add to the list of completions */
cqd->things_queued_ever.FetchAdd(1, grpc_core::MemoryOrder::RELAXED);
cqd->completed_tail->next =
((uintptr_t)storage) | (1u & cqd->completed_tail->next);
reinterpret_cast<uintptr_t>(storage) | (1u & cqd->completed_tail->next);
cqd->completed_tail = storage;
if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
@ -1176,8 +1179,8 @@ class ExecCtxPluck : public grpc_core::ExecCtx {
cqd->things_queued_ever.Load(grpc_core::MemoryOrder::RELAXED);
grpc_cq_completion* c;
grpc_cq_completion* prev = &cqd->completed_head;
while ((c = (grpc_cq_completion*)(prev->next &
~static_cast<uintptr_t>(1))) !=
while ((c = reinterpret_cast<grpc_cq_completion*>(
prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
if (c->tag == a->tag) {
prev->next = (prev->next & static_cast<uintptr_t>(1)) |
@ -1248,9 +1251,9 @@ static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
break;
}
prev = &cqd->completed_head;
while (
(c = (grpc_cq_completion*)(prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
while ((c = reinterpret_cast<grpc_cq_completion*>(
prev->next & ~static_cast<uintptr_t>(1))) !=
&cqd->completed_head) {
if (c->tag == tag) {
prev->next = (prev->next & static_cast<uintptr_t>(1)) |
(c->next & ~static_cast<uintptr_t>(1));

@ -101,11 +101,12 @@ static void register_builtin_channel_init() {
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
grpc_add_connected_filter, nullptr);
grpc_channel_init_register_stage(GRPC_CLIENT_LAME_CHANNEL,
GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
append_filter, (void*)&grpc_lame_filter);
grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
(void*)&grpc_core::Server::kServerTopFilter);
grpc_channel_init_register_stage(
GRPC_CLIENT_LAME_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
append_filter, const_cast<grpc_channel_filter*>(&grpc_lame_filter));
grpc_channel_init_register_stage(
GRPC_SERVER_CHANNEL, INT_MAX, prepend_filter,
const_cast<grpc_channel_filter*>(&grpc_core::Server::kServerTopFilter));
}
typedef struct grpc_plugin {

@ -1207,7 +1207,7 @@ static uint32_t elems_phash(uint32_t i) {
uint32_t y = i / 108;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(elems_r)) {
uint32_t delta = (uint32_t)elems_r[y];
uint32_t delta = static_cast<uint32_t>(elems_r[y]);
h += delta;
}
return h;

@ -42,14 +42,15 @@ grpc_status_code grpc_get_status_code_from_metadata(grpc_mdelem md) {
}
void* user_data = grpc_mdelem_get_user_data(md, destroy_status);
if (user_data != nullptr) {
return static_cast<grpc_status_code>((intptr_t)user_data - STATUS_OFFSET);
return static_cast<grpc_status_code>(reinterpret_cast<intptr_t>(user_data) -
STATUS_OFFSET);
}
uint32_t status;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(md), &status)) {
status = GRPC_STATUS_UNKNOWN; /* could not parse status code */
}
grpc_mdelem_set_user_data(
md, destroy_status, (void*)static_cast<intptr_t>(status + STATUS_OFFSET));
grpc_mdelem_set_user_data(md, destroy_status,
reinterpret_cast<void*>(status + STATUS_OFFSET));
return static_cast<grpc_status_code>(status);
}

@ -43,8 +43,9 @@ grpc_status_code gsec_aead_crypter_encrypt(
char** error_details) {
if (crypter != nullptr && crypter->vtable != nullptr &&
crypter->vtable->encrypt_iovec != nullptr) {
struct iovec aad_vec = {(void*)aad, aad_length};
struct iovec plaintext_vec = {(void*)plaintext, plaintext_length};
struct iovec aad_vec = {const_cast<uint8_t*>(aad), aad_length};
struct iovec plaintext_vec = {const_cast<uint8_t*>(plaintext),
plaintext_length};
struct iovec ciphertext_vec = {ciphertext_and_tag,
ciphertext_and_tag_length};
return crypter->vtable->encrypt_iovec(
@ -81,8 +82,8 @@ grpc_status_code gsec_aead_crypter_decrypt(
size_t plaintext_length, size_t* bytes_written, char** error_details) {
if (crypter != nullptr && crypter->vtable != nullptr &&
crypter->vtable->decrypt_iovec != nullptr) {
struct iovec aad_vec = {(void*)aad, aad_length};
struct iovec ciphertext_vec = {(void*)ciphertext_and_tag,
struct iovec aad_vec = {const_cast<uint8_t*>(aad), aad_length};
struct iovec ciphertext_vec = {const_cast<uint8_t*>(ciphertext_and_tag),
ciphertext_and_tag_length};
struct iovec plaintext_vec = {plaintext, plaintext_length};
return crypter->vtable->decrypt_iovec(

@ -31,16 +31,18 @@
/* Use little endian to interpret a string of bytes as uint32_t. */
static uint32_t load_32_le(const unsigned char* buffer) {
return (((uint32_t)buffer[3]) << 24) | (((uint32_t)buffer[2]) << 16) |
(((uint32_t)buffer[1]) << 8) | ((uint32_t)buffer[0]);
return (static_cast<uint32_t>(buffer[3]) << 24) |
(static_cast<uint32_t>(buffer[2]) << 16) |
(static_cast<uint32_t>(buffer[1]) << 8) |
static_cast<uint32_t>(buffer[0]);
}
/* Store uint32_t as a string of little endian bytes. */
static void store_32_le(uint32_t value, unsigned char* buffer) {
buffer[3] = (unsigned char)(value >> 24) & 0xFF;
buffer[2] = (unsigned char)(value >> 16) & 0xFF;
buffer[1] = (unsigned char)(value >> 8) & 0xFF;
buffer[0] = (unsigned char)(value)&0xFF;
buffer[3] = static_cast<unsigned char>(value >> 24) & 0xFF;
buffer[2] = static_cast<unsigned char>(value >> 16) & 0xFF;
buffer[1] = static_cast<unsigned char>(value >> 8) & 0xFF;
buffer[0] = static_cast<unsigned char>(value) & 0xFF;
}
/* Frame writer implementation. */

@ -279,7 +279,7 @@ void alts_handshaker_client_handle_response(alts_handshaker_client* c,
if (code != GRPC_STATUS_OK) {
upb_strview details = grpc_gcp_HandshakerStatus_details(resp_status);
if (details.size > 0) {
char* error_details = (char*)gpr_zalloc(details.size + 1);
char* error_details = static_cast<char*>(gpr_zalloc(details.size + 1));
memcpy(error_details, details.data, details.size);
gpr_log(GPR_ERROR, "Error from handshaker service:%s", error_details);
gpr_free(error_details);

@ -56,16 +56,18 @@ static void maybe_append_error_msg(const char* appendix, char** dst) {
/* Use little endian to interpret a string of bytes as uint32_t. */
static uint32_t load_32_le(const unsigned char* buffer) {
return (((uint32_t)buffer[3]) << 24) | (((uint32_t)buffer[2]) << 16) |
(((uint32_t)buffer[1]) << 8) | ((uint32_t)buffer[0]);
return (static_cast<uint32_t>(buffer[3]) << 24) |
(static_cast<uint32_t>(buffer[2]) << 16) |
(static_cast<uint32_t>(buffer[1]) << 8) |
static_cast<uint32_t>(buffer[0]);
}
/* Store uint32_t as a string of little endian bytes. */
static void store_32_le(uint32_t value, unsigned char* buffer) {
buffer[3] = (unsigned char)(value >> 24) & 0xFF;
buffer[2] = (unsigned char)(value >> 16) & 0xFF;
buffer[1] = (unsigned char)(value >> 8) & 0xFF;
buffer[0] = (unsigned char)(value)&0xFF;
buffer[3] = static_cast<unsigned char>(value >> 24) & 0xFF;
buffer[2] = static_cast<unsigned char>(value >> 16) & 0xFF;
buffer[1] = static_cast<unsigned char>(value >> 8) & 0xFF;
buffer[0] = static_cast<unsigned char>(value) & 0xFF;
}
/* Ensures header and tag iovec have sufficient length. */

@ -87,10 +87,10 @@ static bool read_frame_size(const grpc_slice_buffer* sb,
}
GPR_ASSERT(remaining == 0);
/* Gets little-endian frame size. */
uint32_t frame_size = (((uint32_t)frame_size_buffer[3]) << 24) |
(((uint32_t)frame_size_buffer[2]) << 16) |
(((uint32_t)frame_size_buffer[1]) << 8) |
((uint32_t)frame_size_buffer[0]);
uint32_t frame_size = (static_cast<uint32_t>(frame_size_buffer[3]) << 24) |
(static_cast<uint32_t>(frame_size_buffer[2]) << 16) |
(static_cast<uint32_t>(frame_size_buffer[1]) << 8) |
static_cast<uint32_t>(frame_size_buffer[0]);
if (frame_size > kMaxFrameLength) {
gpr_log(GPR_ERROR, "Frame size is larger than maximum frame size");
return false;

@ -524,7 +524,8 @@ static tsi_result fake_handshaker_result_create_frame_protector(
static tsi_result fake_handshaker_result_get_unused_bytes(
const tsi_handshaker_result* self, const unsigned char** bytes,
size_t* bytes_size) {
fake_handshaker_result* result = (fake_handshaker_result*)self;
fake_handshaker_result* result = reinterpret_cast<fake_handshaker_result*>(
const_cast<tsi_handshaker_result*>(self));
*bytes_size = result->unused_bytes_size;
*bytes = result->unused_bytes;
return TSI_OK;
@ -581,8 +582,9 @@ static tsi_result fake_handshaker_get_bytes_to_send_to_peer(
static_cast<tsi_fake_handshake_message>(impl->next_message_to_send + 2);
const char* msg_string =
tsi_fake_handshake_message_to_string(impl->next_message_to_send);
result = tsi_fake_frame_set_data((unsigned char*)msg_string,
strlen(msg_string), &impl->outgoing_frame);
result = tsi_fake_frame_set_data(
reinterpret_cast<unsigned char*>(const_cast<char*>(msg_string)),
strlen(msg_string), &impl->outgoing_frame);
if (result != TSI_OK) return result;
if (next_message_to_send > TSI_FAKE_HANDSHAKE_MESSAGE_MAX) {
next_message_to_send = TSI_FAKE_HANDSHAKE_MESSAGE_MAX;

@ -340,8 +340,7 @@ static tsi_result add_pem_certificate(X509* cert, tsi_peer_property* property) {
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
TSI_X509_PEM_CERT_PROPERTY, (const char*)contents,
static_cast<size_t>(len), property);
TSI_X509_PEM_CERT_PROPERTY, contents, static_cast<size_t>(len), property);
BIO_free(bio);
return result;
}
@ -554,12 +553,12 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX* context,
X509* certificate = nullptr;
BIO* pem;
GPR_ASSERT(pem_cert_chain_size <= INT_MAX);
pem = BIO_new_mem_buf((void*)pem_cert_chain,
static_cast<int>(pem_cert_chain_size));
pem = BIO_new_mem_buf(pem_cert_chain, static_cast<int>(pem_cert_chain_size));
if (pem == nullptr) return TSI_OUT_OF_RESOURCES;
do {
certificate = PEM_read_bio_X509_AUX(pem, nullptr, nullptr, (void*)"");
certificate =
PEM_read_bio_X509_AUX(pem, nullptr, nullptr, const_cast<char*>(""));
if (certificate == nullptr) {
result = TSI_INVALID_ARGUMENT;
break;
@ -570,7 +569,7 @@ static tsi_result ssl_ctx_use_certificate_chain(SSL_CTX* context,
}
while (true) {
X509* certificate_authority =
PEM_read_bio_X509(pem, nullptr, nullptr, (void*)"");
PEM_read_bio_X509(pem, nullptr, nullptr, const_cast<char*>(""));
if (certificate_authority == nullptr) {
ERR_clear_error();
break; /* Done reading. */
@ -674,10 +673,11 @@ static tsi_result ssl_ctx_use_pem_private_key(SSL_CTX* context,
EVP_PKEY* private_key = nullptr;
BIO* pem;
GPR_ASSERT(pem_key_size <= INT_MAX);
pem = BIO_new_mem_buf((void*)pem_key, static_cast<int>(pem_key_size));
pem = BIO_new_mem_buf(pem_key, static_cast<int>(pem_key_size));
if (pem == nullptr) return TSI_OUT_OF_RESOURCES;
do {
private_key = PEM_read_bio_PrivateKey(pem, nullptr, nullptr, (void*)"");
private_key =
PEM_read_bio_PrivateKey(pem, nullptr, nullptr, const_cast<char*>(""));
if (private_key == nullptr) {
result = TSI_INVALID_ARGUMENT;
break;
@ -718,7 +718,7 @@ static tsi_result x509_store_load_certs(X509_STORE* cert_store,
X509_NAME* root_name = nullptr;
BIO* pem;
GPR_ASSERT(pem_roots_size <= INT_MAX);
pem = BIO_new_mem_buf((void*)pem_roots, static_cast<int>(pem_roots_size));
pem = BIO_new_mem_buf(pem_roots, static_cast<int>(pem_roots_size));
if (cert_store == nullptr) return TSI_INVALID_ARGUMENT;
if (pem == nullptr) return TSI_OUT_OF_RESOURCES;
if (root_names != nullptr) {
@ -727,7 +727,7 @@ static tsi_result x509_store_load_certs(X509_STORE* cert_store,
}
while (true) {
root = PEM_read_bio_X509_AUX(pem, nullptr, nullptr, (void*)"");
root = PEM_read_bio_X509_AUX(pem, nullptr, nullptr, const_cast<char*>(""));
if (root == nullptr) {
ERR_clear_error();
break; /* We're at the end of stream. */
@ -837,10 +837,10 @@ tsi_result tsi_ssl_extract_x509_subject_names_from_pem_cert(
tsi_result result = TSI_OK;
X509* cert = nullptr;
BIO* pem;
pem = BIO_new_mem_buf((void*)pem_cert, static_cast<int>(strlen(pem_cert)));
pem = BIO_new_mem_buf(pem_cert, static_cast<int>(strlen(pem_cert)));
if (pem == nullptr) return TSI_OUT_OF_RESOURCES;
cert = PEM_read_bio_X509(pem, nullptr, nullptr, (void*)"");
cert = PEM_read_bio_X509(pem, nullptr, nullptr, const_cast<char*>(""));
if (cert == nullptr) {
gpr_log(GPR_ERROR, "Invalid certificate");
result = TSI_INVALID_ARGUMENT;
@ -1207,8 +1207,8 @@ tsi_result tsi_ssl_get_cert_chain_contents(STACK_OF(X509) * peer_chain,
return TSI_INTERNAL_ERROR;
}
tsi_result result = tsi_construct_string_peer_property(
TSI_X509_PEM_CERT_CHAIN_PROPERTY, (const char*)contents,
static_cast<size_t>(len), property);
TSI_X509_PEM_CERT_CHAIN_PROPERTY, contents, static_cast<size_t>(len),
property);
BIO_free(bio);
return result;
}
@ -1712,7 +1712,7 @@ static int client_handshaker_factory_npn_callback(
const unsigned char* in, unsigned int inlen, void* arg) {
tsi_ssl_client_handshaker_factory* factory =
static_cast<tsi_ssl_client_handshaker_factory*>(arg);
return select_protocol_list((const unsigned char**)out, outlen,
return select_protocol_list(const_cast<const unsigned char**>(out), outlen,
factory->alpn_protocol_list,
factory->alpn_protocol_list_length, in, inlen);
}

@ -87,7 +87,7 @@ void ChannelFilterPluginInit() {
for (size_t i = 0; i < channel_filters->size(); ++i) {
FilterRecord& filter = (*channel_filters)[i];
grpc_channel_init_register_stage(filter.stack_type, filter.priority,
MaybeAddFilter, (void*)&filter);
MaybeAddFilter, &filter);
}
}

@ -95,7 +95,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService(
gpr_log(GPR_ERROR,
"Adding multiple generic services is unsupported for now. "
"Dropping the service %p",
(void*)service);
service);
} else {
generic_service_ = service;
}
@ -122,7 +122,7 @@ ServerBuilder& ServerBuilder::experimental_type::RegisterCallbackGenericService(
gpr_log(GPR_ERROR,
"Adding multiple generic services is unsupported for now. "
"Dropping the service %p",
(void*)service);
service);
} else {
builder_->callback_generic_service_ = service;
}

@ -53,13 +53,13 @@ static void thd_func(void* arg) {
if (a->validator != nullptr) {
a->validator(a->server, a->cq, a->registered_method);
}
gpr_event_set(&a->done_thd, (void*)1);
gpr_event_set(&a->done_thd, reinterpret_cast<void*>(1));
}
/* Sets the done_write event */
static void set_done_write(void* arg, grpc_error* /*error*/) {
gpr_event* done_write = static_cast<gpr_event*>(arg);
gpr_event_set(done_write, (void*)1);
gpr_event_set(done_write, reinterpret_cast<void*>(1));
}
static void server_setup_transport(void* ts, grpc_transport* transport) {
@ -72,7 +72,7 @@ static void server_setup_transport(void* ts, grpc_transport* transport) {
/* Sets the read_done event */
static void set_read_done(void* arg, grpc_error* /*error*/) {
gpr_event* read_done = static_cast<gpr_event*>(arg);
gpr_event_set(read_done, (void*)1);
gpr_event_set(read_done, reinterpret_cast<void*>(1));
}
/* shutdown client */
@ -307,7 +307,7 @@ bool rst_stream_client_validator(grpc_slice_buffer* incoming, void* /*arg*/) {
return success;
}
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
void server_verifier_request_call(grpc_server* server,
grpc_completion_queue* cq,

@ -49,7 +49,7 @@
"\x00\x00\x20\x00\x00\x00\x00\x00\x01" \
"\x00\x00\x00\x00"
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
static void verifier(grpc_server* server, grpc_completion_queue* cq,
void* /*registered_method*/) {

@ -66,7 +66,7 @@ static const char prefix[] =
"\x01\x00\x00\x27\x10"
"";
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
static void verifier(grpc_server* server, grpc_completion_queue* cq,
void* registered_method) {

@ -38,7 +38,7 @@
"\x10\x02te\x08trailers" \
"\x10\x0auser-agent\"bad-client grpc-c/0.12.0.0 (linux)"
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
static void verifier_succeeds(grpc_server* server, grpc_completion_queue* cq,
void* registered_method) {

@ -85,7 +85,7 @@
"\x10\x0cgrpc-timeout\x02" \
"5S"
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
static void verifier(grpc_server* server, grpc_completion_queue* cq,
void* /*registered_method*/) {

@ -44,7 +44,7 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
typedef struct test_ctx test_ctx;

@ -34,7 +34,7 @@
#include "test/core/util/subprocess.h"
#include "test/core/util/test_config.h"
static void* tag(intptr_t t) { return (void*)t; }
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
static void run_test(const char* target, size_t nops) {
grpc_channel_credentials* ssl_creds =
@ -147,7 +147,7 @@ int main(int argc, char** argv) {
args[1] = const_cast<char*>("--bind");
std::string joined = grpc_core::JoinHostPort("::", port);
args[2] = const_cast<char*>(joined.c_str());
svr = gpr_subprocess_create(4, (const char**)args);
svr = gpr_subprocess_create(4, const_cast<const char**>(args));
gpr_free(args[0]);
for (i = 3; i <= 4; i++) {

@ -62,7 +62,8 @@ void bad_ssl_run(grpc_server* server) {
grpc_server_start(server);
error = grpc_server_request_call(server, &s, &call_details,
&request_metadata_recv, cq, cq, (void*)1);
&request_metadata_recv, cq, cq,
reinterpret_cast<void*>(1));
GPR_ASSERT(GRPC_CALL_OK == error);
signal(SIGINT, sigint_handler);

@ -111,12 +111,12 @@ static bool add_original_filter(grpc_channel_stack_builder* builder,
}
static void init_plugin(void) {
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
add_original_filter,
(void*)&original_filter);
grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
add_replacement_filter,
(void*)&replacement_filter);
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, INT_MAX, add_original_filter,
const_cast<grpc_channel_filter*>(&original_filter));
grpc_channel_init_register_stage(
GRPC_CLIENT_CHANNEL, INT_MAX, add_replacement_filter,
const_cast<grpc_channel_filter*>(&replacement_filter));
}
static void destroy_plugin(void) {}

@ -73,8 +73,8 @@ void ValidateChannelTraceData(const Json& json,
Json::Object object = json.object_value();
Json& num_events_logged_json = object["numEventsLogged"];
ASSERT_EQ(num_events_logged_json.type(), Json::Type::STRING);
size_t num_events_logged =
(size_t)strtol(num_events_logged_json.string_value().c_str(), nullptr, 0);
size_t num_events_logged = static_cast<size_t>(
strtol(num_events_logged_json.string_value().c_str(), nullptr, 0));
ASSERT_EQ(num_events_logged, num_events_logged_expected);
Json& start_time_json = object["creationTimestamp"];
ASSERT_EQ(start_time_json.type(), Json::Type::STRING);

@ -200,8 +200,8 @@ void ValidateChildInteger(const Json::Object& object, const std::string& key,
}
ASSERT_NE(it, object.end());
ASSERT_EQ(it->second.type(), Json::Type::STRING);
int64_t gotten_number =
(int64_t)strtol(it->second.string_value().c_str(), nullptr, 0);
int64_t gotten_number = static_cast<int64_t>(
strtol(it->second.string_value().c_str(), nullptr, 0));
EXPECT_EQ(gotten_number, expected);
}

@ -128,7 +128,7 @@ class ResultHandler : public grpc_core::Resolver::ResultHandler {
GPR_ASSERT(output != nullptr);
output->result = std::move(result);
output->error = GRPC_ERROR_NONE;
gpr_event_set(&output->ev, (void*)1);
gpr_event_set(&output->ev, reinterpret_cast<void*>(1));
}
void ReturnError(grpc_error* error) override {
@ -136,7 +136,7 @@ class ResultHandler : public grpc_core::Resolver::ResultHandler {
reinterpret_cast<ResolverOutput*>(gpr_atm_acq_load(&output_));
GPR_ASSERT(output != nullptr);
output->error = error;
gpr_event_set(&output->ev, (void*)1);
gpr_event_set(&output->ev, reinterpret_cast<void*>(1));
}
private:

@ -174,7 +174,7 @@ static void poll_pollset_until_request_done(iomgr_args* args) {
gpr_mu_unlock(args->mu);
grpc_core::ExecCtx::Get()->Flush();
}
gpr_event_set(&args->ev, (void*)1);
gpr_event_set(&args->ev, reinterpret_cast<void*>(1));
}
struct OnResolutionCallbackArg;

@ -54,7 +54,7 @@ class ResultHandler : public grpc_core::Resolver::ResultHandler {
for (size_t i = 0; i < expected_.addresses.size(); ++i) {
GPR_ASSERT(actual.addresses[i] == expected_.addresses[i]);
}
gpr_event_set(ev_, (void*)1);
gpr_event_set(ev_, reinterpret_cast<void*>(1));
ev_ = nullptr;
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save