Merge github.com:grpc/grpc into grpc_millis

pull/11866/head
Craig Tiller 7 years ago
commit bbe282f600
  1. 666
      CMakeLists.txt
  2. 2
      Makefile
  3. 4
      Rakefile
  4. 3
      build.yaml
  5. 0
      cmake/gRPCConfig.cmake.in
  6. 0
      cmake/gRPCConfigVersion.cmake.in
  7. 1
      doc/environment_variables.md
  8. 9
      include/grpc++/impl/codegen/call.h
  9. 7
      include/grpc/impl/codegen/grpc_types.h
  10. 11
      include/grpc/impl/codegen/port_platform.h
  11. 11
      setup.py
  12. 34
      src/c-ares/CMakeLists.txt
  13. 7
      src/core/ext/filters/client_channel/channel_connectivity.c
  14. 133
      src/core/ext/filters/client_channel/client_channel.c
  15. 12
      src/core/ext/filters/client_channel/http_connect_handshaker.c
  16. 2
      src/core/ext/filters/client_channel/lb_policy.c
  17. 10
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.c
  18. 42
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c
  19. 12
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.c
  20. 33
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c
  21. 12
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c
  22. 13
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c
  23. 5
      src/core/ext/filters/client_channel/lb_policy_factory.c
  24. 2
      src/core/ext/filters/client_channel/proxy_mapper_registry.c
  25. 7
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c
  26. 8
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c
  27. 20
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c
  28. 5
      src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c
  29. 6
      src/core/ext/filters/client_channel/retry_throttle.c
  30. 28
      src/core/ext/filters/client_channel/subchannel.c
  31. 12
      src/core/ext/filters/client_channel/subchannel_index.c
  32. 7
      src/core/ext/filters/client_channel/uri_parser.c
  33. 15
      src/core/ext/filters/deadline/deadline_filter.c
  34. 18
      src/core/ext/filters/http/client/http_client_filter.c
  35. 2
      src/core/ext/filters/http/http_filters_plugin.c
  36. 16
      src/core/ext/filters/http/message_compress/message_compress_filter.c
  37. 22
      src/core/ext/filters/http/server/http_server_filter.c
  38. 16
      src/core/ext/filters/load_reporting/server_load_reporting_filter.c
  39. 2
      src/core/ext/filters/load_reporting/server_load_reporting_plugin.c
  40. 8
      src/core/ext/transport/chttp2/client/chttp2_connector.c
  41. 17
      src/core/ext/transport/chttp2/server/chttp2_server.c
  42. 269
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  43. 4
      src/core/ext/transport/chttp2/transport/frame_goaway.c
  44. 4
      src/core/ext/transport/chttp2/transport/frame_ping.c
  45. 2
      src/core/ext/transport/chttp2/transport/frame_rst_stream.c
  46. 2
      src/core/ext/transport/chttp2/transport/frame_settings.c
  47. 8
      src/core/ext/transport/chttp2/transport/frame_window_update.c
  48. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.c
  49. 9
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  50. 4
      src/core/ext/transport/chttp2/transport/hpack_table.c
  51. 29
      src/core/ext/transport/chttp2/transport/internal.h
  52. 9
      src/core/ext/transport/chttp2/transport/parsing.c
  53. 4
      src/core/ext/transport/chttp2/transport/stream_map.c
  54. 68
      src/core/ext/transport/chttp2/transport/writing.c
  55. 15
      src/core/ext/transport/inproc/inproc_transport.c
  56. 18
      src/core/lib/channel/channel_args.c
  57. 8
      src/core/lib/channel/channel_stack_builder.c
  58. 25
      src/core/lib/channel/connected_channel.c
  59. 14
      src/core/lib/channel/handshaker.c
  60. 2
      src/core/lib/channel/handshaker_registry.c
  61. 2
      src/core/lib/debug/stats.c
  62. 175
      src/core/lib/debug/stats_data.c
  63. 73
      src/core/lib/debug/stats_data.h
  64. 41
      src/core/lib/debug/stats_data.yaml
  65. 6
      src/core/lib/debug/trace.c
  66. 2
      src/core/lib/http/format_request.c
  67. 13
      src/core/lib/http/httpcli.c
  68. 7
      src/core/lib/http/parser.c
  69. 4
      src/core/lib/iomgr/closure.c
  70. 7
      src/core/lib/iomgr/combiner.c
  71. 14
      src/core/lib/iomgr/error.c
  72. 6
      src/core/lib/iomgr/ev_epoll1_linux.c
  73. 14
      src/core/lib/iomgr/ev_epollex_linux.c
  74. 15
      src/core/lib/iomgr/ev_epollsig_linux.c
  75. 64
      src/core/lib/iomgr/ev_poll_posix.c
  76. 4
      src/core/lib/iomgr/ev_posix.c
  77. 178
      src/core/lib/iomgr/executor.c
  78. 7
      src/core/lib/iomgr/executor.h
  79. 3
      src/core/lib/iomgr/load_file.c
  80. 13
      src/core/lib/iomgr/resolve_address_posix.c
  81. 2
      src/core/lib/iomgr/resolve_address_windows.c
  82. 36
      src/core/lib/iomgr/resource_quota.c
  83. 9
      src/core/lib/iomgr/tcp_client_posix.c
  84. 179
      src/core/lib/iomgr/tcp_posix.c
  85. 11
      src/core/lib/iomgr/tcp_server_posix.c
  86. 2
      src/core/lib/iomgr/tcp_server_utils_posix_common.c
  87. 8
      src/core/lib/iomgr/timer_heap.c
  88. 2
      src/core/lib/iomgr/timer_manager.c
  89. 16
      src/core/lib/iomgr/udp_server.c
  90. 6
      src/core/lib/iomgr/unix_sockets_posix.c
  91. 3
      src/core/lib/iomgr/wakeup_fd_cv.c
  92. 2
      src/core/lib/json/json.c
  93. 26
      src/core/lib/json/json_string.c
  94. 36
      src/core/lib/security/transport/security_handshaker.c
  95. 4
      src/core/lib/slice/b64.c
  96. 26
      src/core/lib/slice/slice.c
  97. 9
      src/core/lib/slice/slice_buffer.c
  98. 5
      src/core/lib/slice/slice_hash_table.c
  99. 13
      src/core/lib/slice/slice_intern.c
  100. 6
      src/core/lib/surface/alarm.c
  101. Some files were not shown because too many files have changed in this diff Show More

File diff suppressed because it is too large Load Diff

@ -8378,7 +8378,7 @@ PUBLIC_HEADERS_C += \
LIBARES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBARES_SRC))))
$(LIBARES_OBJS): CPPFLAGS += -Ithird_party/cares -Ithird_party/cares/cares $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux) $(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) -fvisibility=hidden -D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(if $(subst MINGW32,,$(SYSTEM)),-DHAVE_CONFIG_H,)
$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion $(if $(subst Darwin,,$(SYSTEM)),,-Wno-shorten-64-to-32) $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
$(LIBDIR)/$(CONFIG)/libares.a: $(ZLIB_DEP) $(LIBARES_OBJS)
$(E) "[AR] Creating $@"

@ -80,10 +80,12 @@ task 'dlls' do
grpc_config = ENV['GRPC_CONFIG'] || 'opt'
verbose = ENV['V'] || '0'
env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE -Wno-unused-variable -Wno-unused-result -DCARES_STATICLIB" '
env = 'CPPFLAGS="-D_WIN32_WINNT=0x600 -DUNICODE -D_UNICODE -Wno-unused-variable -Wno-unused-result -DCARES_STATICLIB -Wno-error=conversion -Wno-incompatible-pointer-types -Wno-sign-compare -Wno-parentheses" '
env += 'LDFLAGS=-static '
env += 'SYSTEM=MINGW32 '
env += 'EMBED_ZLIB=true '
env += 'EMBED_OPENSSL=true '
env += 'EMBED_CARES=true '
env += 'BUILDDIR=/tmp '
env += "V=#{verbose} "
out = GrpcBuildConfig::CORE_WINDOWS_DLL

@ -4844,7 +4844,8 @@ configs:
UBSAN_OPTIONS: halt_on_error=1:print_stacktrace=1:suppressions=tools/ubsan_suppressions.txt
defaults:
ares:
CFLAGS: -Wno-sign-conversion $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
CFLAGS: -Wno-sign-conversion $(if $(subst Darwin,,$(SYSTEM)),,-Wno-shorten-64-to-32)
$(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
CPPFLAGS: -Ithird_party/cares -Ithird_party/cares/cares $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux)
$(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) -fvisibility=hidden
-D_GNU_SOURCE -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(if $(subst

@ -48,6 +48,7 @@ some configuration as environment variables that can be set.
- compression - traces compression operations
- connectivity_state - traces connectivity state changes to channels
- channel_stack_builder - traces information about channel stacks being built
- executor - traces grpc's internal thread pool ('the executor')
- http - traces state in the http2 transport engine
- http1 - traces HTTP/1.x operations performed by gRPC
- inproc - traces the in-process transport

@ -169,6 +169,15 @@ class WriteOptions {
return *this;
}
/// Guarantee that all bytes have been written to the wire before completing
/// this write (usually writes are completed when they pass flow control)
inline WriteOptions& set_write_through() {
SetBit(GRPC_WRITE_THROUGH);
return *this;
}
inline bool is_write_through() const { return GetBit(GRPC_WRITE_THROUGH); }
/// Get value for the flag indicating that this is the last message, and
/// should be coalesced with trailing metadata.
///

@ -355,8 +355,11 @@ typedef enum grpc_call_error {
/** Force compression to be disabled for a particular write
(start_write/add_metadata). Illegal on invoke/accept. */
#define GRPC_WRITE_NO_COMPRESS (0x00000002u)
/** Force this message to be written to the socket before completing it */
#define GRPC_WRITE_THROUGH (0x00000004u)
/** Mask of all valid flags. */
#define GRPC_WRITE_USED_MASK (GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS)
#define GRPC_WRITE_USED_MASK \
(GRPC_WRITE_BUFFER_HINT | GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_THROUGH)
/** Initial metadata flags */
/** Signal that the call is idempotent */
@ -377,7 +380,7 @@ typedef enum grpc_call_error {
GRPC_INITIAL_METADATA_WAIT_FOR_READY | \
GRPC_INITIAL_METADATA_CACHEABLE_REQUEST | \
GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET | \
GRPC_INITIAL_METADATA_CORKED)
GRPC_INITIAL_METADATA_CORKED | GRPC_WRITE_THROUGH)
/** A single metadata element */
typedef struct grpc_metadata {

@ -409,4 +409,15 @@ typedef unsigned __int64 uint64_t;
#define CENSUSAPI GRPCAPI
#endif
#ifndef GPR_ATTRIBUTE_NO_TSAN /* (1) */
#if defined(__has_feature)
#if __has_feature(thread_sanitizer)
#define GPR_ATTRIBUTE_NO_TSAN __attribute__((no_sanitize("thread")))
#endif /* __has_feature(thread_sanitizer) */
#endif /* defined(__has_feature) */
#ifndef GPR_ATTRIBUTE_NO_TSAN /* (2) */
#define GPR_ATTRIBUTE_NO_TSAN
#endif /* GPR_ATTRIBUTE_NO_TSAN (2) */
#endif /* GPR_ATTRIBUTE_NO_TSAN (1) */
#endif /* GRPC_IMPL_CODEGEN_PORT_PLATFORM_H */

@ -141,7 +141,7 @@ CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform and "64bit" in platform.architecture()[0]:
if "win32" in sys.platform:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
EXTENSION_INCLUDE_DIRECTORIES = (
@ -160,11 +160,12 @@ DEFINE_MACROS = (
('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600),
('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),)
# TODO(zyc): Re-enble c-ares on x64 and x86 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),
('GRPC_ARES', 0),)
if '64bit' in platform.architecture()[0]:
# TODO(zyc): Re-enble c-ares on x64 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('MS_WIN64', 1), ('GRPC_ARES', 0),)
DEFINE_MACROS += (('MS_WIN64', 1),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits

@ -1,34 +0,0 @@
# c-ares cmake file for gRPC
#
# This is currently very experimental, and unsupported.
#
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
string(TOLOWER ${CMAKE_SYSTEM_NAME} cares_system_name)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares)
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/cares)
if(${cares_system_name} MATCHES windows)
add_definitions(-DCARES_STATICLIB=1)
add_definitions(-DWIN32_LEAN_AND_MEAN=1)
else()
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../third_party/cares/config_${cares_system_name})
add_definitions(-DHAVE_CONFIG_H=1)
add_definitions(-D_GNU_SOURCE=1)
endif()
file(GLOB lib_sources ../../third_party/cares/cares/*.c)
add_library(cares ${lib_sources})

@ -87,7 +87,7 @@ static void delete_state_watcher(grpc_exec_ctx *exec_ctx, state_watcher *w) {
static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
grpc_cq_completion *ignored) {
int delete = 0;
state_watcher *w = pw;
state_watcher *w = (state_watcher *)pw;
gpr_mu_lock(&w->mu);
switch (w->phase) {
case WAITING:
@ -203,7 +203,7 @@ void grpc_channel_watch_connectivity_state(
grpc_channel_element *client_channel_elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
state_watcher *w = gpr_malloc(sizeof(*w));
state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
@ -228,7 +228,8 @@ void grpc_channel_watch_connectivity_state(
w->channel = channel;
w->error = NULL;
watcher_timer_init_arg *wa = gpr_malloc(sizeof(watcher_timer_init_arg));
watcher_timer_init_arg *wa =
(watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
wa->w = w;
wa->deadline = deadline;
GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,

@ -150,7 +150,8 @@ static void *method_parameters_create_from_json(const grpc_json *json) {
if (!parse_timeout(field, &timeout)) return NULL;
}
}
method_parameters *value = gpr_malloc(sizeof(method_parameters));
method_parameters *value =
(method_parameters *)gpr_malloc(sizeof(method_parameters));
gpr_ref_init(&value->refs, 1);
value->timeout = timeout;
value->wait_for_ready = wait_for_ready;
@ -256,7 +257,7 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
lb_policy_connectivity_watcher *w = arg;
lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) {
@ -283,7 +284,8 @@ static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
lb_policy_connectivity_watcher *w =
(lb_policy_connectivity_watcher *)gpr_malloc(sizeof(*w));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
GRPC_CLOSURE_INIT(&w->on_changed, on_lb_policy_state_changed_locked, w,
@ -312,7 +314,8 @@ typedef struct {
} service_config_parsing_state;
static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
service_config_parsing_state *parsing_state = arg;
service_config_parsing_state *parsing_state =
(service_config_parsing_state *)arg;
if (strcmp(field->key, "retryThrottling") == 0) {
if (parsing_state->retry_throttle_data != NULL) return; // Duplicate.
if (field->type != GRPC_JSON_OBJECT) return;
@ -367,7 +370,7 @@ static void parse_retry_throttle_params(const grpc_json *field, void *arg) {
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
channel_data *chand = arg;
channel_data *chand = (channel_data *)arg;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
grpc_error_string(error));
@ -393,7 +396,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
if (channel_arg != NULL && channel_arg->type == GRPC_ARG_POINTER) {
grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
grpc_lb_addresses *addresses =
(grpc_lb_addresses *)channel_arg->value.pointer.p;
bool found_balancer_address = false;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) {
@ -588,9 +592,10 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
grpc_transport_op *op = arg;
grpc_channel_element *elem = op->handler_private.extra_arg;
channel_data *chand = elem->channel_data;
grpc_transport_op *op = (grpc_transport_op *)arg;
grpc_channel_element *elem =
(grpc_channel_element *)op->handler_private.extra_arg;
channel_data *chand = (channel_data *)elem->channel_data;
if (op->on_connectivity_state_change != NULL) {
grpc_connectivity_state_notify_on_state_change(
@ -644,7 +649,7 @@ static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(op->set_accept_stream == false);
if (op->bind_pollset != NULL) {
@ -664,7 +669,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
const grpc_channel_info *info) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
gpr_mu_lock(&chand->info_mu);
if (info->lb_policy_name != NULL) {
*info->lb_policy_name = chand->info_lb_policy_name == NULL
@ -684,7 +689,7 @@ static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
// Initialize data members.
@ -715,7 +720,8 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
"client channel factory arg must be a pointer");
}
grpc_client_channel_factory_ref(arg->value.pointer.p);
chand->client_channel_factory = arg->value.pointer.p;
chand->client_channel_factory =
(grpc_client_channel_factory *)arg->value.pointer.p;
// Get server name to resolve, using proxy mapper if needed.
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI);
if (arg == NULL) {
@ -747,7 +753,7 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_resolver *resolver = arg;
grpc_resolver *resolver = (grpc_resolver *)arg;
grpc_resolver_shutdown_locked(exec_ctx, resolver);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
}
@ -755,7 +761,7 @@ static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
/* Destructor for channel_data */
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
@ -850,7 +856,7 @@ typedef struct client_channel_call_data {
grpc_subchannel_call *grpc_client_channel_get_subchannel_call(
grpc_call_element *elem) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
return calld->subchannel_call;
}
@ -870,7 +876,7 @@ static void waiting_for_pick_batches_add(
// This is called via the call combiner, so access to calld is synchronized.
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
call_data *calld = arg;
call_data *calld = (call_data *)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_transport_stream_op_batch_finish_with_failure(
@ -884,7 +890,7 @@ static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: failing %" PRIdPTR " pending batches: %s",
@ -914,7 +920,7 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
// This is called via the call combiner, so access to calld is synchronized.
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *ignored) {
call_data *calld = arg;
call_data *calld = (call_data *)arg;
if (calld->waiting_for_pick_batches_count > 0) {
--calld->waiting_for_pick_batches_count;
grpc_subchannel_call_process_op(
@ -926,8 +932,8 @@ static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
// This is called via the call combiner, so access to calld is synchronized.
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR
" pending batches to subchannel_call=%p",
@ -952,8 +958,8 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
// that the resolver has returned results to the channel.
static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call",
chand, calld);
@ -963,7 +969,7 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
grpc_server_retry_throttle_data_ref(chand->retry_throttle_data);
}
if (chand->method_params_table != NULL) {
calld->method_params = grpc_method_config_table_get(
calld->method_params = (method_parameters *)grpc_method_config_table_get(
exec_ctx, chand->method_params_table, calld->path);
if (calld->method_params != NULL) {
method_parameters_ref(calld->method_params);
@ -986,8 +992,8 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
const grpc_connected_subchannel_call_args call_args = {
.pollent = calld->pollent,
.path = calld->path,
@ -1015,8 +1021,8 @@ static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_error *error) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
chand->interested_parties);
if (calld->connected_subchannel == NULL) {
@ -1059,15 +1065,16 @@ typedef struct {
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args = arg;
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
if (args->finished) {
gpr_free(args);
return;
}
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
// If we don't yet have a resolver result, then a closure for
// pick_after_resolver_result_done_locked() will have been added to
// chand->waiting_for_resolver_result_closures, and it may not be invoked
@ -1093,7 +1100,8 @@ static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
pick_after_resolver_result_args *args = arg;
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -1104,8 +1112,8 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
}
args->finished = true;
grpc_call_element *elem = args->elem;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
@ -1125,8 +1133,8 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: deferring pick pending resolver result", chand,
@ -1150,9 +1158,9 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
// holding the call combiner.
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (error != GRPC_ERROR_NONE && calld->lb_policy != NULL) {
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
@ -1169,9 +1177,9 @@ static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
// Unrefs the LB policy and invokes subchannel_ready_locked().
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)arg;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously",
chand, calld);
@ -1188,8 +1196,8 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_lb_policy_pick_args *inputs) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
chand, calld, chand->lb_policy);
@ -1224,8 +1232,8 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {
GPR_TIMER_BEGIN("pick_subchannel", 0);
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
bool pick_done = false;
if (chand->lb_policy != NULL) {
apply_service_config_to_call_locked(exec_ctx, elem);
@ -1297,8 +1305,8 @@ static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_call_element *elem = arg;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)arg;
call_data *calld = (call_data *)elem->call_data;
if (calld->retry_throttle_data != NULL) {
if (error == GRPC_ERROR_NONE) {
grpc_server_retry_throttle_data_record_success(
@ -1319,8 +1327,8 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
static void cc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
batch);
@ -1413,8 +1421,8 @@ done:
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
// Initialize data members.
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
@ -1434,8 +1442,8 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (chand->deadline_checking_enabled) {
grpc_deadline_state_destroy(exec_ctx, elem);
}
@ -1469,7 +1477,7 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
calld->pollent = pollent;
}
@ -1493,7 +1501,7 @@ const grpc_channel_filter grpc_client_channel_filter = {
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
channel_data *chand = arg;
channel_data *chand = (channel_data *)arg;
if (chand->lb_policy != NULL) {
grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
} else {
@ -1507,7 +1515,7 @@ static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_connectivity_state out =
grpc_connectivity_state_check(&chand->state_tracker);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
@ -1578,7 +1586,7 @@ static void external_connectivity_watcher_list_remove(
int grpc_client_channel_num_external_connectivity_watchers(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
int count = 0;
gpr_mu_lock(&chand->external_connectivity_watcher_list_mu);
@ -1595,7 +1603,7 @@ int grpc_client_channel_num_external_connectivity_watchers(
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
external_connectivity_watcher *w = arg;
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
grpc_closure *follow_up = w->on_complete;
grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
w->chand->interested_parties);
@ -1608,7 +1616,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_ignored) {
external_connectivity_watcher *w = arg;
external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
external_connectivity_watcher *found = NULL;
if (w->state != NULL) {
external_connectivity_watcher_list_append(w->chand, w);
@ -1637,8 +1645,9 @@ void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_polling_entity pollent, grpc_connectivity_state *state,
grpc_closure *closure, grpc_closure *watcher_timer_init) {
channel_data *chand = elem->channel_data;
external_connectivity_watcher *w = gpr_zalloc(sizeof(*w));
channel_data *chand = (channel_data *)elem->channel_data;
external_connectivity_watcher *w =
(external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
w->chand = chand;
w->pollent = pollent;
w->on_complete = closure;

@ -124,7 +124,7 @@ static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
// Callback invoked when finished writing HTTP CONNECT request.
static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
http_connect_handshaker* handshaker = arg;
http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the write failed or we're shutting down, clean up and invoke the
@ -145,7 +145,7 @@ static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
http_connect_handshaker* handshaker = arg;
http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
gpr_mu_lock(&handshaker->mu);
if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
// If the read failed or we're shutting down, clean up and invoke the
@ -281,7 +281,8 @@ static void http_connect_handshaker_do_handshake(
GPR_ASSERT(arg->type == GRPC_ARG_STRING);
gpr_string_split(arg->value.string, "\n", &header_strings,
&num_header_strings);
headers = gpr_malloc(sizeof(grpc_http_header) * num_header_strings);
headers = (grpc_http_header*)gpr_malloc(sizeof(grpc_http_header) *
num_header_strings);
for (size_t i = 0; i < num_header_strings; ++i) {
char* sep = strchr(header_strings[i], ':');
if (sep == NULL) {
@ -308,7 +309,7 @@ static void http_connect_handshaker_do_handshake(
grpc_httpcli_request request;
memset(&request, 0, sizeof(request));
request.host = server_name;
request.http.method = "CONNECT";
request.http.method = (char*)"CONNECT";
request.http.path = server_name;
request.http.hdrs = headers;
request.http.hdr_count = num_headers;
@ -333,7 +334,8 @@ static const grpc_handshaker_vtable http_connect_handshaker_vtable = {
http_connect_handshaker_do_handshake};
static grpc_handshaker* grpc_http_connect_handshaker_create() {
http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
http_connect_handshaker* handshaker =
(http_connect_handshaker*)gpr_malloc(sizeof(*handshaker));
memset(handshaker, 0, sizeof(*handshaker));
grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
gpr_mu_init(&handshaker->mu);

@ -67,7 +67,7 @@ void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
static void shutdown_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_lb_policy *policy = arg;
grpc_lb_policy *policy = (grpc_lb_policy *)arg;
policy->vtable->shutdown_locked(exec_ctx, policy);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
}

@ -49,7 +49,7 @@ typedef struct {
static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->send_initial_metadata_succeeded = true;
}
@ -59,7 +59,7 @@ static void on_complete_for_send(grpc_exec_ctx *exec_ctx, void *arg,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
call_data *calld = arg;
call_data *calld = (call_data *)arg;
if (error == GRPC_ERROR_NONE) {
calld->recv_initial_metadata_succeeded = true;
}
@ -70,7 +70,7 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
@ -84,7 +84,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
@ -98,7 +98,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static void start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {

@ -181,7 +181,7 @@ typedef struct wrapped_rr_closure_arg {
* order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_rr_closure_arg *wc_arg = arg;
wrapped_rr_closure_arg *wc_arg = (wrapped_rr_closure_arg *)arg;
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
@ -245,7 +245,7 @@ static void add_pending_pick(pending_pick **root,
grpc_connected_subchannel **target,
grpc_call_context_element *context,
grpc_closure *on_complete) {
pending_pick *pp = gpr_zalloc(sizeof(*pp));
pending_pick *pp = (pending_pick *)gpr_zalloc(sizeof(*pp));
pp->next = *root;
pp->pick_args = *pick_args;
pp->target = target;
@ -271,7 +271,7 @@ typedef struct pending_ping {
} pending_ping;
static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
pending_ping *pping = gpr_zalloc(sizeof(*pping));
pending_ping *pping = (pending_ping *)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
pping->next = *root;
@ -671,7 +671,7 @@ static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx,
grpc_lb_addresses *addresses =
process_serverlist_locked(exec_ctx, glb_policy->serverlist);
GPR_ASSERT(addresses != NULL);
grpc_lb_policy_args *args = gpr_zalloc(sizeof(*args));
grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args));
args->client_channel_factory = glb_policy->cc_factory;
args->combiner = glb_policy->base.combiner;
// Replace the LB addresses in the channel args that we pass down to
@ -798,7 +798,7 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
rr_connectivity_data *rr_connectivity = arg;
rr_connectivity_data *rr_connectivity = (rr_connectivity_data *)arg;
glb_lb_policy *glb_policy = rr_connectivity->glb_policy;
if (glb_policy->shutting_down) {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@ -841,8 +841,8 @@ static grpc_slice_hash_table_entry targets_info_entry_create(
}
static int balancer_name_cmp_fn(void *a, void *b) {
const char *a_str = a;
const char *b_str = b;
const char *a_str = (const char *)a;
const char *b_str = (const char *)b;
return strcmp(a_str, b_str);
}
@ -929,14 +929,14 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
if (arg == NULL || arg->type != GRPC_ARG_POINTER) {
return NULL;
}
grpc_lb_addresses *addresses = arg->value.pointer.p;
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
size_t num_grpclb_addrs = 0;
for (size_t i = 0; i < addresses->num_addresses; ++i) {
if (addresses->addresses[i].is_balancer) ++num_grpclb_addrs;
}
if (num_grpclb_addrs == 0) return NULL;
glb_lb_policy *glb_policy = gpr_zalloc(sizeof(*glb_policy));
glb_lb_policy *glb_policy = (glb_lb_policy *)gpr_zalloc(sizeof(*glb_policy));
/* Get server name. */
arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
@ -1190,7 +1190,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
wrapped_rr_closure_arg *wc_arg = gpr_zalloc(sizeof(wrapped_rr_closure_arg));
wrapped_rr_closure_arg *wc_arg =
(wrapped_rr_closure_arg *)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
@ -1272,7 +1273,7 @@ static void schedule_next_client_load_report(grpc_exec_ctx *exec_ctx,
static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
glb_policy->client_load_report_payload = NULL;
if (error != GRPC_ERROR_NONE || glb_policy->lb_call == NULL) {
@ -1312,7 +1313,7 @@ static bool load_report_counters_are_zero(grpc_grpclb_request *request) {
static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == NULL) {
glb_policy->client_load_report_timer_pending = false;
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
@ -1517,7 +1518,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->initial_request_sent = true;
// If we attempted to send a client load report before the initial
// request was sent, send the load report now.
@ -1530,7 +1531,7 @@ static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx,
static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
@ -1647,7 +1648,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
glb_policy->retry_timer_active = false;
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
@ -1662,7 +1663,7 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
GPR_ASSERT(glb_policy->lb_call != NULL);
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
char *status_details =
@ -1724,8 +1725,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
glb_policy->pending_update_args->args);
gpr_free(glb_policy->pending_update_args);
}
glb_policy->pending_update_args =
gpr_zalloc(sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc(
sizeof(*glb_policy->pending_update_args));
glb_policy->pending_update_args->client_channel_factory =
args->client_channel_factory;
glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args);
@ -1753,7 +1754,8 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
(void *)glb_policy);
}
}
const grpc_lb_addresses *addresses = arg->value.pointer.p;
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
GPR_ASSERT(glb_policy->lb_channel != NULL);
grpc_channel_args *lb_channel_args = build_lb_channel_args(
exec_ctx, addresses, glb_policy->response_generator, args->args);
@ -1786,7 +1788,7 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
glb_lb_policy *glb_policy = (glb_lb_policy *)arg;
if (glb_policy->shutting_down) goto done;
// Re-initialize the lb_call. This should also take care of updating the
// embedded RR policy. Note that the current RR policy, if any, will stay in

@ -42,7 +42,8 @@ struct grpc_grpclb_client_stats {
};
grpc_grpclb_client_stats* grpc_grpclb_client_stats_create() {
grpc_grpclb_client_stats* client_stats = gpr_zalloc(sizeof(*client_stats));
grpc_grpclb_client_stats* client_stats =
(grpc_grpclb_client_stats*)gpr_zalloc(sizeof(*client_stats));
gpr_ref_init(&client_stats->refs, 1);
return client_stats;
}
@ -88,7 +89,8 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
// Record the drop.
if (client_stats->drop_token_counts == NULL) {
client_stats->drop_token_counts =
gpr_zalloc(sizeof(grpc_grpclb_dropped_call_counts));
(grpc_grpclb_dropped_call_counts*)gpr_zalloc(
sizeof(grpc_grpclb_dropped_call_counts));
}
grpc_grpclb_dropped_call_counts* drop_token_counts =
client_stats->drop_token_counts;
@ -103,9 +105,9 @@ void grpc_grpclb_client_stats_add_call_dropped_locked(
while (new_num_entries < drop_token_counts->num_entries + 1) {
new_num_entries *= 2;
}
drop_token_counts->token_counts =
gpr_realloc(drop_token_counts->token_counts,
new_num_entries * sizeof(grpc_grpclb_drop_token_count));
drop_token_counts->token_counts = (grpc_grpclb_drop_token_count*)gpr_realloc(
drop_token_counts->token_counts,
new_num_entries * sizeof(grpc_grpclb_drop_token_count));
grpc_grpclb_drop_token_count* new_entry =
&drop_token_counts->token_counts[drop_token_counts->num_entries++];
new_entry->token = gpr_strdup(token);

@ -25,7 +25,7 @@
/* invoked once for every Server in ServerList */
static bool count_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
grpc_grpclb_serverlist *sl = *arg;
grpc_grpclb_serverlist *sl = (grpc_grpclb_serverlist *)*arg;
grpc_grpclb_server server;
if (!pb_decode(stream, grpc_lb_v1_Server_fields, &server)) {
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@ -46,9 +46,10 @@ typedef struct decode_serverlist_arg {
/* invoked once for every Server in ServerList */
static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
void **arg) {
decode_serverlist_arg *dec_arg = *arg;
decode_serverlist_arg *dec_arg = (decode_serverlist_arg *)*arg;
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx);
grpc_grpclb_server *server = gpr_zalloc(sizeof(grpc_grpclb_server));
grpc_grpclb_server *server =
(grpc_grpclb_server *)gpr_zalloc(sizeof(grpc_grpclb_server));
if (!pb_decode(stream, grpc_lb_v1_Server_fields, server)) {
gpr_free(server);
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream));
@ -59,7 +60,8 @@ static bool decode_serverlist(pb_istream_t *stream, const pb_field_t *field,
}
grpc_grpclb_request *grpc_grpclb_request_create(const char *lb_service_name) {
grpc_grpclb_request *req = gpr_malloc(sizeof(grpc_grpclb_request));
grpc_grpclb_request *req =
(grpc_grpclb_request *)gpr_malloc(sizeof(grpc_grpclb_request));
req->has_client_stats = false;
req->has_initial_request = true;
req->initial_request.has_name = true;
@ -78,14 +80,15 @@ static void populate_timestamp(gpr_timespec timestamp,
static bool encode_string(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
char *str = *arg;
char *str = (char *)*arg;
if (!pb_encode_tag_for_field(stream, field)) return false;
return pb_encode_string(stream, (uint8_t *)str, strlen(str));
}
static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
void *const *arg) {
grpc_grpclb_dropped_call_counts *drop_entries = *arg;
grpc_grpclb_dropped_call_counts *drop_entries =
(grpc_grpclb_dropped_call_counts *)*arg;
if (drop_entries == NULL) return true;
for (size_t i = 0; i < drop_entries->num_entries; ++i) {
if (!pb_encode_tag_for_field(stream, field)) return false;
@ -104,7 +107,8 @@ static bool encode_drops(pb_ostream_t *stream, const pb_field_t *field,
grpc_grpclb_request *grpc_grpclb_load_report_request_create_locked(
grpc_grpclb_client_stats *client_stats) {
grpc_grpclb_request *req = gpr_zalloc(sizeof(grpc_grpclb_request));
grpc_grpclb_request *req =
(grpc_grpclb_request *)gpr_zalloc(sizeof(grpc_grpclb_request));
req->has_client_stats = true;
req->client_stats.has_timestamp = true;
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp);
@ -179,7 +183,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_grpc_grpclb_response),
GRPC_SLICE_LENGTH(encoded_grpc_grpclb_response));
pb_istream_t stream_at_start = stream;
grpc_grpclb_serverlist *sl = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_serverlist *sl =
(grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_response res;
memset(&res, 0, sizeof(grpc_grpclb_response));
// First pass: count number of servers.
@ -193,7 +198,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
sl->servers = gpr_zalloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
sl->servers = (grpc_grpclb_server **)gpr_zalloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@ -226,13 +232,16 @@ void grpc_grpclb_destroy_serverlist(grpc_grpclb_serverlist *serverlist) {
grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
const grpc_grpclb_serverlist *sl) {
grpc_grpclb_serverlist *copy = gpr_zalloc(sizeof(grpc_grpclb_serverlist));
grpc_grpclb_serverlist *copy =
(grpc_grpclb_serverlist *)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
copy->servers = gpr_malloc(sizeof(grpc_grpclb_server *) * sl->num_servers);
copy->servers = (grpc_grpclb_server **)gpr_malloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] = gpr_malloc(sizeof(grpc_grpclb_server));
copy->servers[i] =
(grpc_grpclb_server *)gpr_malloc(sizeof(grpc_grpclb_server));
memcpy(copy->servers[i], sl->servers[i], sizeof(grpc_grpclb_server));
}
return copy;

@ -217,7 +217,7 @@ static int pf_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
pp = gpr_malloc(sizeof(*pp));
pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->initial_metadata_flags = pick_args->initial_metadata_flags;
@ -314,7 +314,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
const grpc_lb_addresses *addresses = arg->value.pointer.p;
const grpc_lb_addresses *addresses =
(const grpc_lb_addresses *)arg->value.pointer.p;
if (addresses->num_addresses == 0) {
// Empty update. Unsubscribe from all current subchannels and put the
// channel in TRANSIENT_FAILURE.
@ -392,7 +393,8 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_channel_args_destroy(exec_ctx, p->pending_update_args->args);
gpr_free(p->pending_update_args);
}
p->pending_update_args = gpr_zalloc(sizeof(*p->pending_update_args));
p->pending_update_args =
(grpc_lb_policy_args *)gpr_zalloc(sizeof(*p->pending_update_args));
p->pending_update_args->client_channel_factory =
args->client_channel_factory;
p->pending_update_args->args = grpc_channel_args_copy(args->args);
@ -456,7 +458,7 @@ static void pf_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
static void pf_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
pick_first_lb_policy *p = arg;
pick_first_lb_policy *p = (pick_first_lb_policy *)arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
@ -678,7 +680,7 @@ static grpc_lb_policy *create_pick_first(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
pick_first_lb_policy *p = gpr_zalloc(sizeof(*p));
pick_first_lb_policy *p = (pick_first_lb_policy *)gpr_zalloc(sizeof(*p));
if (GRPC_TRACER_ON(grpc_lb_pick_first_trace)) {
gpr_log(GPR_DEBUG, "Pick First %p created.", (void *)p);
}

@ -144,10 +144,11 @@ struct rr_subchannel_list {
static rr_subchannel_list *rr_subchannel_list_create(round_robin_lb_policy *p,
size_t num_subchannels) {
rr_subchannel_list *subchannel_list = gpr_zalloc(sizeof(*subchannel_list));
rr_subchannel_list *subchannel_list =
(rr_subchannel_list *)gpr_zalloc(sizeof(*subchannel_list));
subchannel_list->policy = p;
subchannel_list->subchannels =
gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
(subchannel_data *)gpr_zalloc(sizeof(subchannel_data) * num_subchannels);
subchannel_list->num_subchannels = num_subchannels;
gpr_ref_init(&subchannel_list->refcount, 1);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
@ -452,7 +453,7 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (!p->started_picking) {
start_picking_locked(exec_ctx, p);
}
pending_pick *pp = gpr_malloc(sizeof(*pp));
pending_pick *pp = (pending_pick *)gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->target = target;
pp->on_complete = on_complete;
@ -553,7 +554,7 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
subchannel_data *sd = arg;
subchannel_data *sd = (subchannel_data *)arg;
round_robin_lb_policy *p = sd->subchannel_list->policy;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
@ -754,7 +755,7 @@ static void rr_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
}
return;
}
grpc_lb_addresses *addresses = arg->value.pointer.p;
grpc_lb_addresses *addresses = (grpc_lb_addresses *)arg->value.pointer.p;
rr_subchannel_list *subchannel_list =
rr_subchannel_list_create(p, addresses->num_addresses);
if (addresses->num_addresses == 0) {
@ -887,7 +888,7 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
grpc_lb_policy_factory *factory,
grpc_lb_policy_args *args) {
GPR_ASSERT(args->client_channel_factory != NULL);
round_robin_lb_policy *p = gpr_zalloc(sizeof(*p));
round_robin_lb_policy *p = (round_robin_lb_policy *)gpr_zalloc(sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable, args->combiner);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");

@ -28,11 +28,12 @@
grpc_lb_addresses* grpc_lb_addresses_create(
size_t num_addresses, const grpc_lb_user_data_vtable* user_data_vtable) {
grpc_lb_addresses* addresses = gpr_zalloc(sizeof(grpc_lb_addresses));
grpc_lb_addresses* addresses =
(grpc_lb_addresses*)gpr_zalloc(sizeof(grpc_lb_addresses));
addresses->num_addresses = num_addresses;
addresses->user_data_vtable = user_data_vtable;
const size_t addresses_size = sizeof(grpc_lb_address) * num_addresses;
addresses->addresses = gpr_zalloc(addresses_size);
addresses->addresses = (grpc_lb_address*)gpr_zalloc(addresses_size);
return addresses;
}

@ -34,7 +34,7 @@ typedef struct {
static void grpc_proxy_mapper_list_register(grpc_proxy_mapper_list* list,
bool at_start,
grpc_proxy_mapper* mapper) {
list->list = gpr_realloc(
list->list = (grpc_proxy_mapper**)gpr_realloc(
list->list, (list->num_mappers + 1) * sizeof(grpc_proxy_mapper*));
if (at_start) {
memmove(list->list + 1, list->list,

@ -144,7 +144,7 @@ static void dns_ares_channel_saw_error_locked(grpc_exec_ctx *exec_ctx,
static void dns_ares_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
ares_dns_resolver *r = arg;
ares_dns_resolver *r = (ares_dns_resolver *)arg;
r->have_retry_timer = false;
if (error == GRPC_ERROR_NONE) {
if (!r->resolving) {
@ -227,7 +227,7 @@ static char *choose_service_config(char *service_config_choice_json) {
static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
ares_dns_resolver *r = arg;
ares_dns_resolver *r = (ares_dns_resolver *)arg;
grpc_channel_args *result = NULL;
GPR_ASSERT(r->resolving);
r->resolving = false;
@ -361,7 +361,8 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
const char *path = args->uri->path;
if (path[0] == '/') ++path;
/* Create resolver. */
ares_dns_resolver *r = gpr_zalloc(sizeof(ares_dns_resolver));
ares_dns_resolver *r =
(ares_dns_resolver *)gpr_zalloc(sizeof(ares_dns_resolver));
grpc_resolver_init(&r->base, &dns_ares_resolver_vtable, args->combiner);
if (0 != strcmp(args->uri->authority, "")) {
r->dns_server = gpr_strdup(args->uri->authority);

@ -111,7 +111,7 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver,
grpc_pollset_set *pollset_set) {
*ev_driver = gpr_malloc(sizeof(grpc_ares_ev_driver));
*ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver));
int status = ares_init(&(*ev_driver)->channel);
gpr_log(GPR_DEBUG, "grpc_ares_ev_driver_create");
if (status != ARES_SUCCESS) {
@ -178,7 +178,7 @@ static fd_node *pop_fd_node(fd_node **head, int fd) {
static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = arg;
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
fdn->readable_registered = false;
@ -205,7 +205,7 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
fd_node *fdn = arg;
fd_node *fdn = (fd_node *)arg;
grpc_ares_ev_driver *ev_driver = fdn->ev_driver;
gpr_mu_lock(&fdn->mu);
fdn->writable_registered = false;
@ -251,7 +251,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx,
if (fdn == NULL) {
char *fd_name;
gpr_asprintf(&fd_name, "ares_ev_driver-%" PRIuPTR, i);
fdn = gpr_malloc(sizeof(fd_node));
fdn = (fd_node *)gpr_malloc(sizeof(fd_node));
gpr_log(GPR_DEBUG, "new fd: %d", socks[i]);
fdn->grpc_fd = grpc_fd_create(socks[i], fd_name);
fdn->ev_driver = ev_driver;

@ -158,9 +158,9 @@ static void on_hostbyname_done_cb(void *arg, int status, int timeouts,
for (i = 0; hostent->h_addr_list[i] != NULL; i++) {
}
(*lb_addresses)->num_addresses += i;
(*lb_addresses)->addresses =
gpr_realloc((*lb_addresses)->addresses,
sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
(*lb_addresses)->addresses = (grpc_lb_address *)gpr_realloc(
(*lb_addresses)->addresses,
sizeof(grpc_lb_address) * (*lb_addresses)->num_addresses);
for (i = prev_naddr; i < (*lb_addresses)->num_addresses; i++) {
switch (hostent->h_addrtype) {
case AF_INET6: {
@ -293,12 +293,12 @@ static void on_txt_done_cb(void *arg, int status, int timeouts,
// Found a service config record.
if (result != NULL) {
size_t service_config_len = result->length - prefix_len;
*r->service_config_json_out = gpr_malloc(service_config_len + 1);
*r->service_config_json_out = (char *)gpr_malloc(service_config_len + 1);
memcpy(*r->service_config_json_out, result->txt + prefix_len,
service_config_len);
for (result = result->next; result != NULL && !result->record_start;
result = result->next) {
*r->service_config_json_out = gpr_realloc(
*r->service_config_json_out = (char *)gpr_realloc(
*r->service_config_json_out, service_config_len + result->length + 1);
memcpy(*r->service_config_json_out + service_config_len, result->txt,
result->length);
@ -360,7 +360,8 @@ static grpc_ares_request *grpc_dns_lookup_ares_impl(
error = grpc_ares_ev_driver_create(&ev_driver, interested_parties);
if (error != GRPC_ERROR_NONE) goto error_cleanup;
grpc_ares_request *r = gpr_zalloc(sizeof(grpc_ares_request));
grpc_ares_request *r =
(grpc_ares_request *)gpr_zalloc(sizeof(grpc_ares_request));
gpr_mu_init(&r->mu);
r->ev_driver = ev_driver;
r->on_done = on_done;
@ -502,10 +503,11 @@ static void on_dns_lookup_done_cb(grpc_exec_ctx *exec_ctx, void *arg,
if (r->lb_addrs == NULL || r->lb_addrs->num_addresses == 0) {
*resolved_addresses = NULL;
} else {
*resolved_addresses = gpr_zalloc(sizeof(grpc_resolved_addresses));
*resolved_addresses =
(grpc_resolved_addresses *)gpr_zalloc(sizeof(grpc_resolved_addresses));
(*resolved_addresses)->naddrs = r->lb_addrs->num_addresses;
(*resolved_addresses)->addrs = gpr_zalloc(sizeof(grpc_resolved_address) *
(*resolved_addresses)->naddrs);
(*resolved_addresses)->addrs = (grpc_resolved_address *)gpr_zalloc(
sizeof(grpc_resolved_address) * (*resolved_addresses)->naddrs);
for (size_t i = 0; i < (*resolved_addresses)->naddrs; i++) {
GPR_ASSERT(!r->lb_addrs->addresses[i].is_balancer);
memcpy(&(*resolved_addresses)->addrs[i],

@ -159,7 +159,7 @@ typedef struct set_response_closure_arg {
static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
set_response_closure_arg* closure_arg = arg;
set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
grpc_fake_resolver_response_generator* generator = closure_arg->generator;
fake_resolver* r = generator->resolver;
if (r->next_results != NULL) {
@ -178,7 +178,8 @@ void grpc_fake_resolver_response_generator_set_response(
grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
grpc_channel_args* next_response) {
GPR_ASSERT(generator->resolver != NULL);
set_response_closure_arg* closure_arg = gpr_zalloc(sizeof(*closure_arg));
set_response_closure_arg* closure_arg =
(set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
closure_arg->generator = generator;
closure_arg->next_response = grpc_channel_args_copy(next_response);
GRPC_CLOSURE_SCHED(exec_ctx,

@ -139,12 +139,14 @@ static long compare_server_name(void* key1, void* key2, void* unused) {
}
static void destroy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data = value;
grpc_server_retry_throttle_data* throttle_data =
(grpc_server_retry_throttle_data*)value;
grpc_server_retry_throttle_data_unref(throttle_data);
}
static void* copy_server_retry_throttle_data(void* value, void* unused) {
grpc_server_retry_throttle_data* throttle_data = value;
grpc_server_retry_throttle_data* throttle_data =
(grpc_server_retry_throttle_data*)value;
return grpc_server_retry_throttle_data_ref(throttle_data);
}

@ -157,7 +157,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *subchannel,
static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_connected_subchannel *c = arg;
grpc_connected_subchannel *c = (grpc_connected_subchannel *)arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
gpr_free(c);
}
@ -181,7 +181,7 @@ void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_subchannel *c = arg;
grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_free((void *)c->filters);
grpc_channel_args_destroy(exec_ctx, c->args);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
@ -290,21 +290,23 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
return c;
}
c = gpr_zalloc(sizeof(*c));
c = (grpc_subchannel *)gpr_zalloc(sizeof(*c));
c->key = key;
gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
c->connector = connector;
grpc_connector_ref(c->connector);
c->num_filters = args->filter_count;
if (c->num_filters > 0) {
c->filters = gpr_malloc(sizeof(grpc_channel_filter *) * c->num_filters);
c->filters = (const grpc_channel_filter **)gpr_malloc(
sizeof(grpc_channel_filter *) * c->num_filters);
memcpy((void *)c->filters, args->filters,
sizeof(grpc_channel_filter *) * c->num_filters);
} else {
c->filters = NULL;
}
c->pollset_set = grpc_pollset_set_create();
grpc_resolved_address *addr = gpr_malloc(sizeof(*addr));
grpc_resolved_address *addr =
(grpc_resolved_address *)gpr_malloc(sizeof(*addr));
grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
grpc_resolved_address *new_address = NULL;
grpc_channel_args *new_args = NULL;
@ -400,7 +402,7 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(grpc_subchannel *c,
static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
external_state_watcher *w = arg;
external_state_watcher *w = (external_state_watcher *)arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
@ -416,7 +418,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_subchannel *c = arg;
grpc_subchannel *c = (grpc_subchannel *)arg;
gpr_mu_lock(&c->mu);
c->have_alarm = false;
if (c->disconnected) {
@ -498,7 +500,7 @@ void grpc_subchannel_notify_on_state_change(
}
gpr_mu_unlock(&c->mu);
} else {
w = gpr_malloc(sizeof(*w));
w = (external_state_watcher *)gpr_malloc(sizeof(*w));
w->subchannel = c;
w->pollset_set = interested_parties;
w->notify = notify;
@ -530,7 +532,7 @@ void grpc_connected_subchannel_process_transport_op(
static void subchannel_on_child_state_changed(grpc_exec_ctx *exec_ctx, void *p,
grpc_error *error) {
state_watcher *sw = p;
state_watcher *sw = (state_watcher *)p;
grpc_subchannel *c = sw->subchannel;
gpr_mu *mu = &c->mu;
@ -620,7 +622,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
memset(&c->connecting_result, 0, sizeof(c->connecting_result));
/* initialize state watcher */
sw_subchannel = gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel = (state_watcher *)gpr_malloc(sizeof(*sw_subchannel));
sw_subchannel->subchannel = c;
sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
GRPC_CLOSURE_INIT(&sw_subchannel->closure, subchannel_on_child_state_changed,
@ -657,7 +659,7 @@ static bool publish_transport_locked(grpc_exec_ctx *exec_ctx,
static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_subchannel *c = arg;
grpc_subchannel *c = (grpc_subchannel *)arg;
grpc_channel_args *delete_channel_args = c->connecting_result.channel_args;
GRPC_SUBCHANNEL_WEAK_REF(c, "connected");
@ -693,7 +695,7 @@ static void subchannel_connected(grpc_exec_ctx *exec_ctx, void *arg,
static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
grpc_error *error) {
grpc_subchannel_call *c = call;
grpc_subchannel_call *c = (grpc_subchannel_call *)call;
GPR_ASSERT(c->schedule_closure_after_destroy != NULL);
GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
grpc_connected_subchannel *connection = c->connection;
@ -747,7 +749,7 @@ grpc_error *grpc_connected_subchannel_create_call(
const grpc_connected_subchannel_call_args *args,
grpc_subchannel_call **call) {
grpc_channel_stack *chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
*call = gpr_arena_alloc(
*call = (grpc_subchannel_call *)gpr_arena_alloc(
args->arena, sizeof(grpc_subchannel_call) + chanstk->call_stack_size);
grpc_call_stack *callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
(*call)->connection = GRPC_CONNECTED_SUBCHANNEL_REF(con, "subchannel_call");

@ -43,11 +43,11 @@ static bool g_force_creation = false;
static grpc_subchannel_key *create_key(
const grpc_subchannel_args *args,
grpc_channel_args *(*copy_channel_args)(const grpc_channel_args *args)) {
grpc_subchannel_key *k = gpr_malloc(sizeof(*k));
grpc_subchannel_key *k = (grpc_subchannel_key *)gpr_malloc(sizeof(*k));
k->args.filter_count = args->filter_count;
if (k->args.filter_count > 0) {
k->args.filters =
gpr_malloc(sizeof(*k->args.filters) * k->args.filter_count);
k->args.filters = (const grpc_channel_filter **)gpr_malloc(
sizeof(*k->args.filters) * k->args.filter_count);
memcpy((grpc_channel_filter *)k->args.filters, args->filters,
sizeof(*k->args.filters) * k->args.filter_count);
} else {
@ -137,7 +137,7 @@ grpc_subchannel *grpc_subchannel_index_find(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
grpc_subchannel *c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
gpr_avl_get(index, key, exec_ctx), "index_find");
(grpc_subchannel *)gpr_avl_get(index, key, exec_ctx), "index_find");
gpr_avl_unref(index, exec_ctx);
return c;
@ -159,7 +159,7 @@ grpc_subchannel *grpc_subchannel_index_register(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&g_mu);
// - Check to see if a subchannel already exists
c = gpr_avl_get(index, key, exec_ctx);
c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != NULL) {
c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
}
@ -207,7 +207,7 @@ void grpc_subchannel_index_unregister(grpc_exec_ctx *exec_ctx,
// Check to see if this key still refers to the previously
// registered subchannel
grpc_subchannel *c = gpr_avl_get(index, key, exec_ctx);
grpc_subchannel *c = (grpc_subchannel *)gpr_avl_get(index, key, exec_ctx);
if (c != constructed) {
gpr_avl_unref(index, exec_ctx);
break;

@ -45,7 +45,7 @@ static grpc_uri *bad_uri(const char *uri_text, size_t pos, const char *section,
gpr_log(GPR_ERROR, "%s%s'", line_prefix, uri_text);
gpr_free(line_prefix);
line_prefix = gpr_malloc(pfx_len + 1);
line_prefix = (char *)gpr_malloc(pfx_len + 1);
memset(line_prefix, ' ', pfx_len);
line_prefix[pfx_len] = 0;
gpr_log(GPR_ERROR, "%s^ here", line_prefix);
@ -156,7 +156,8 @@ static void parse_query_parts(grpc_uri *uri) {
gpr_string_split(uri->query, QUERY_PARTS_SEPARATOR, &uri->query_parts,
&uri->num_query_parts);
uri->query_parts_values = gpr_malloc(uri->num_query_parts * sizeof(char **));
uri->query_parts_values =
(char **)gpr_malloc(uri->num_query_parts * sizeof(char **));
for (size_t i = 0; i < uri->num_query_parts; i++) {
char **query_param_parts;
size_t num_query_param_parts;
@ -269,7 +270,7 @@ grpc_uri *grpc_uri_parse(grpc_exec_ctx *exec_ctx, const char *uri_text,
fragment_end = i;
}
uri = gpr_zalloc(sizeof(*uri));
uri = (grpc_uri *)gpr_zalloc(sizeof(*uri));
uri->scheme =
decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
uri->authority = decode_and_copy_component(exec_ctx, uri_text,

@ -38,7 +38,7 @@
// filter stack. Yields the call combiner when the batch returns.
static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* ignored) {
grpc_deadline_state* deadline_state = arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
"got on_complete from cancel_stream batch");
GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
@ -48,8 +48,8 @@ static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
// synchronized.
static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
grpc_call_element* elem = (grpc_call_element*)arg;
grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
GRPC_CLOSURE_INIT(&deadline_state->timer_callback, yield_call_combiner,
deadline_state, grpc_schedule_on_exec_ctx));
@ -158,8 +158,10 @@ struct start_timer_after_init_state {
};
static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
struct start_timer_after_init_state* state = arg;
grpc_deadline_state* deadline_state = state->elem->call_data;
struct start_timer_after_init_state* state =
(struct start_timer_after_init_state*)arg;
grpc_deadline_state* deadline_state =
(grpc_deadline_state*)state->elem->call_data;
if (!state->in_call_combiner) {
// We are initially called without holding the call combiner, so we
// need to bounce ourselves into it.
@ -192,7 +194,8 @@ void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
// call stack initialization is finished. To avoid that problem, we
// create a closure to start the timer, and we schedule that closure
// to be run after call stack initialization is done.
struct start_timer_after_init_state* state = gpr_zalloc(sizeof(*state));
struct start_timer_after_init_state* state =
(struct start_timer_after_init_state*)gpr_zalloc(sizeof(*state));
state->elem = elem;
state->deadline = deadline;
GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,

@ -139,8 +139,8 @@ static grpc_error *client_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
void *user_data, grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@ -154,8 +154,8 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
static void recv_trailing_metadata_on_complete(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_error *error) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (error == GRPC_ERROR_NONE) {
error = client_filter_incoming_metadata(exec_ctx, elem,
calld->recv_trailing_metadata);
@ -234,7 +234,7 @@ static void on_send_message_next_done(grpc_exec_ctx *exec_ctx, void *arg,
}
static char *slice_buffer_to_string(grpc_slice_buffer *slice_buffer) {
char *payload_bytes = gpr_malloc(slice_buffer->length + 1);
char *payload_bytes = (char *)gpr_malloc(slice_buffer->length + 1);
size_t offset = 0;
for (size_t i = 0; i < slice_buffer->count; ++i) {
memcpy(payload_bytes + offset,
@ -300,8 +300,8 @@ static void remove_if_present(grpc_exec_ctx *exec_ctx,
static void hc_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *channeld = (channel_data *)elem->channel_data;
GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
if (batch->recv_initial_metadata) {
@ -536,7 +536,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args *args,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
GPR_ASSERT(!args->is_last);
GPR_ASSERT(args->optional_transport != NULL);
chand->static_scheme = scheme_from_args(args->channel_args);
@ -552,7 +552,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
}

@ -44,7 +44,7 @@ static bool maybe_add_optional_filter(grpc_exec_ctx *exec_ctx,
grpc_channel_stack_builder *builder,
void *arg) {
if (!is_building_http_like_transport(builder)) return true;
optional_filter *filtarg = arg;
optional_filter *filtarg = (optional_filter *)arg;
const grpc_channel_args *channel_args =
grpc_channel_stack_builder_get_channel_arguments(builder);
bool enable = grpc_channel_arg_get_bool(

@ -82,8 +82,8 @@ typedef struct channel_data {
static bool skip_compression(grpc_call_element *elem, uint32_t flags,
bool has_compression_algorithm) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *channeld = (channel_data *)elem->channel_data;
if (flags & (GRPC_WRITE_NO_COMPRESS | GRPC_WRITE_INTERNAL_COMPRESS)) {
return true;
@ -106,8 +106,8 @@ static grpc_error *process_send_initial_metadata(
static grpc_error *process_send_initial_metadata(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_metadata_batch *initial_metadata, bool *has_compression_algorithm) {
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *channeld = (channel_data *)elem->channel_data;
*has_compression_algorithm = false;
grpc_stream_compression_algorithm stream_compression_algorithm =
GRPC_STREAM_COMPRESS_NONE;
@ -285,7 +285,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error) {
call_data *calld = arg;
call_data *calld = (call_data *)arg;
if (calld->send_message_batch != NULL) {
grpc_transport_stream_op_batch_finish_with_failure(
exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
@ -374,7 +374,7 @@ static void start_send_message_batch(grpc_exec_ctx *exec_ctx, void *arg,
static void compress_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
// Handle cancel_stream.
if (batch->cancel_stream) {
@ -473,7 +473,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
GRPC_ERROR_UNREF(calld->cancel_error);
}
@ -482,7 +482,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *channeld = elem->channel_data;
channel_data *channeld = (channel_data *)elem->channel_data;
/* Configuration for message compression */
channeld->enabled_algorithms_bitset =

@ -94,7 +94,7 @@ static void add_error(const char *error_name, grpc_error **cumulative,
static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_metadata_batch *b) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
grpc_error *error = GRPC_ERROR_NONE;
static const char *error_name = "Failed processing incoming headers";
@ -263,8 +263,8 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
err = server_filter_incoming_metadata(exec_ctx, elem,
calld->recv_initial_metadata);
@ -276,8 +276,8 @@ static void hs_on_recv(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
/* Call recv_message_ready if we got the payload via the path field */
if (calld->seen_path_with_query && calld->recv_message_ready != NULL) {
*calld->pp_recv_message = calld->payload_bin_delivered
@ -296,8 +296,8 @@ static void hs_on_complete(grpc_exec_ctx *exec_ctx, void *user_data,
static void hs_recv_message_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (calld->seen_path_with_query) {
// Do nothing. This is probably a GET request, and payload will be
// returned in hs_on_complete callback.
@ -314,7 +314,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
if (op->send_initial_metadata) {
grpc_error *error = GRPC_ERROR_NONE;
@ -376,7 +376,7 @@ static grpc_error *hs_mutate_op(grpc_exec_ctx *exec_ctx,
static void hs_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
grpc_error *error = hs_mutate_op(exec_ctx, elem, op);
if (error != GRPC_ERROR_NONE) {
@ -393,7 +393,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
/* initialize members */
calld->call_combiner = args->call_combiner;
GRPC_CLOSURE_INIT(&calld->hs_on_recv, hs_on_recv, elem,
@ -410,7 +410,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
}

@ -56,8 +56,8 @@ typedef struct channel_data {
static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *err) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (err == GRPC_ERROR_NONE) {
if (calld->recv_initial_metadata->idx.named.path != NULL) {
@ -88,7 +88,7 @@ static void on_initial_md_ready(grpc_exec_ctx *exec_ctx, void *user_data,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
calld->id = (intptr_t)args->call_stack;
GRPC_CLOSURE_INIT(&calld->on_initial_md_ready, on_initial_md_ready, elem,
grpc_schedule_on_exec_ctx);
@ -111,7 +111,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *ignored) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
/* TODO(dgq): do something with the data
channel_data *chand = elem->channel_data;
@ -141,7 +141,7 @@ static grpc_error *init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element_args *args) {
GPR_ASSERT(!args->is_last);
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
chand->id = (intptr_t)args->channel_stack;
/* TODO(dgq): do something with the data
@ -176,8 +176,8 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_mdelem md) {
grpc_call_element *elem = user_data;
call_data *calld = elem->call_data;
grpc_call_element *elem = (grpc_call_element *)user_data;
call_data *calld = (call_data *)elem->call_data;
if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_LB_COST_BIN)) {
calld->trailing_md_string = GRPC_MDVALUE(md);
return GRPC_FILTERED_REMOVE();
@ -189,7 +189,7 @@ static void lr_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *op) {
GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
if (op->recv_initial_metadata) {
/* substitute our callback for the higher callback */

@ -41,7 +41,7 @@ static bool maybe_add_server_load_reporting_filter(
grpc_exec_ctx *exec_ctx, grpc_channel_stack_builder *builder, void *arg) {
const grpc_channel_args *args =
grpc_channel_stack_builder_get_channel_arguments(builder);
const grpc_channel_filter *filter = arg;
const grpc_channel_filter *filter = (const grpc_channel_filter *)arg;
grpc_channel_stack_builder_iterator *it =
grpc_channel_stack_builder_iterator_find(builder, filter->name);
const bool already_has_load_reporting_filter =

@ -93,8 +93,8 @@ static void chttp2_connector_shutdown(grpc_exec_ctx *exec_ctx,
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_handshaker_args *args = arg;
chttp2_connector *c = args->user_data;
grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
chttp2_connector *c = (chttp2_connector *)args->user_data;
gpr_mu_lock(&c->mu);
if (error != GRPC_ERROR_NONE || c->shutdown) {
if (error == GRPC_ERROR_NONE) {
@ -143,7 +143,7 @@ static void start_handshake_locked(grpc_exec_ctx *exec_ctx,
}
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
chttp2_connector *c = arg;
chttp2_connector *c = (chttp2_connector *)arg;
gpr_mu_lock(&c->mu);
GPR_ASSERT(c->connecting);
c->connecting = false;
@ -198,7 +198,7 @@ static const grpc_connector_vtable chttp2_connector_vtable = {
chttp2_connector_connect};
grpc_connector *grpc_chttp2_connector_create() {
chttp2_connector *c = gpr_zalloc(sizeof(*c));
chttp2_connector *c = (chttp2_connector *)gpr_zalloc(sizeof(*c));
c->base.vtable = &chttp2_connector_vtable;
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);

@ -60,8 +60,9 @@ typedef struct {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_handshaker_args *args = arg;
server_connection_state *connection_state = args->user_data;
grpc_handshaker_args *args = (grpc_handshaker_args *)arg;
server_connection_state *connection_state =
(server_connection_state *)args->user_data;
gpr_mu_lock(&connection_state->server_state->mu);
if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
const char *error_str = grpc_error_string(error);
@ -108,7 +109,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_pollset *accepting_pollset,
grpc_tcp_server_acceptor *acceptor) {
server_state *state = arg;
server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
if (state->shutdown) {
gpr_mu_unlock(&state->mu);
@ -143,7 +144,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
void *arg, grpc_pollset **pollsets,
size_t pollset_count) {
server_state *state = arg;
server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = false;
gpr_mu_unlock(&state->mu);
@ -153,7 +154,7 @@ static void server_start_listener(grpc_exec_ctx *exec_ctx, grpc_server *server,
static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
server_state *state = arg;
server_state *state = (server_state *)arg;
/* ensure all threads have unlocked */
gpr_mu_lock(&state->mu);
grpc_closure *destroy_done = state->server_destroy_listener_done;
@ -178,7 +179,7 @@ static void tcp_server_shutdown_complete(grpc_exec_ctx *exec_ctx, void *arg,
static void server_destroy_listener(grpc_exec_ctx *exec_ctx,
grpc_server *server, void *arg,
grpc_closure *destroy_done) {
server_state *state = arg;
server_state *state = (server_state *)arg;
gpr_mu_lock(&state->mu);
state->shutdown = true;
state->server_destroy_listener_done = destroy_done;
@ -208,7 +209,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
if (err != GRPC_ERROR_NONE) {
goto error;
}
state = gpr_zalloc(sizeof(*state));
state = (server_state *)gpr_zalloc(sizeof(*state));
GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
tcp_server_shutdown_complete, state,
grpc_schedule_on_exec_ctx);
@ -225,7 +226,7 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
gpr_mu_init(&state->mu);
const size_t naddrs = resolved->naddrs;
errors = gpr_malloc(sizeof(*errors) * naddrs);
errors = (grpc_error **)gpr_malloc(sizeof(*errors) * naddrs);
for (i = 0; i < naddrs; i++) {
errors[i] =
grpc_tcp_server_add_port(tcp_server, &resolved->addrs[i], &port_temp);

@ -84,8 +84,6 @@ grpc_tracer_flag grpc_trace_chttp2_refcount =
GRPC_TRACER_INITIALIZER(false, "chttp2_refcount");
#endif
static const grpc_transport_vtable vtable;
/* forward declarations of various callbacks that we'll build closures around */
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
grpc_error *error);
@ -248,6 +246,8 @@ void grpc_chttp2_unref_transport(grpc_exec_ctx *exec_ctx,
void grpc_chttp2_ref_transport(grpc_chttp2_transport *t) { gpr_ref(&t->refs); }
#endif
static const grpc_transport_vtable *get_vtable(void);
static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
const grpc_channel_args *channel_args,
grpc_endpoint *ep, bool is_client) {
@ -257,7 +257,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = &vtable;
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
@ -542,11 +542,6 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
}
}
GRPC_CLOSURE_INIT(&t->write_action, write_action, t,
t->opt_target == GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT
? grpc_executor_scheduler
: grpc_schedule_on_exec_ctx);
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
t->ping_state.is_delayed_ping_timer_set = false;
@ -573,7 +568,7 @@ static void init_transport(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void destroy_transport_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->destroying = 1;
close_transport_locked(
exec_ctx, t,
@ -699,7 +694,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
grpc_chttp2_stream *s = sp;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
GPR_TIMER_BEGIN("destroy_stream", 0);
@ -783,7 +778,7 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
grpc_chttp2_stream *grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport *t,
uint32_t id) {
return grpc_chttp2_stream_map_find(&t->stream_map, id);
return (grpc_chttp2_stream *)grpc_chttp2_stream_map_find(&t->stream_map, id);
}
grpc_chttp2_stream *grpc_chttp2_parsing_accept_stream(grpc_exec_ctx *exec_ctx,
@ -842,6 +837,7 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
t->is_first_write_in_batch = true;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_SCHED(
exec_ctx,
@ -860,52 +856,100 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
}
void grpc_chttp2_become_writable(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s,
grpc_chttp2_stream_write_type stream_write_type, const char *reason) {
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
bool also_initiate_write, const char *reason) {
if (!t->closed && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
switch (stream_write_type) {
case GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK:
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED:
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
case GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED:
grpc_chttp2_initiate_write(exec_ctx, t, reason);
break;
if (also_initiate_write) {
grpc_chttp2_initiate_write(exec_ctx, t, reason);
}
}
static grpc_closure_scheduler *write_scheduler(grpc_chttp2_transport *t,
bool early_results_scheduled,
bool partial_write) {
/* if it's not the first write in a batch, always offload to the executor:
we'll probably end up queuing against the kernel anyway, so we'll likely
get better latency overall if we switch writing work elsewhere and continue
with application work above */
if (!t->is_first_write_in_batch) {
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
/* equivalently, if it's a partial write, we *know* we're going to be taking a
thread jump to write it because of the above, may as well do so
immediately */
if (partial_write) {
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
}
switch (t->opt_target) {
case GRPC_CHTTP2_OPTIMIZE_FOR_THROUGHPUT:
/* executor gives us the largest probability of being able to batch a
* write with others on this transport */
return grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
case GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY:
return grpc_schedule_on_exec_ctx;
}
GPR_UNREACHABLE_CODE(return NULL);
}
#define WRITE_STATE_TUPLE_TO_INT(p, i) (2 * (int)(p) + (int)(i))
static const char *begin_writing_desc(bool partial, bool inlined) {
switch (WRITE_STATE_TUPLE_TO_INT(partial, inlined)) {
case WRITE_STATE_TUPLE_TO_INT(false, false):
return "begin write in background";
case WRITE_STATE_TUPLE_TO_INT(false, true):
return "begin write in current thread";
case WRITE_STATE_TUPLE_TO_INT(true, false):
return "begin partial write in background";
case WRITE_STATE_TUPLE_TO_INT(true, true):
return "begin partial write in current thread";
}
GPR_UNREACHABLE_CODE(return "bad state tuple");
}
static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("write_action_begin_locked", 0);
grpc_chttp2_transport *t = gt;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
: grpc_chttp2_begin_write(exec_ctx, t)) {
case GRPC_CHTTP2_NOTHING_TO_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
break;
case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial");
GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
GRPC_CLOSURE_SCHED(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
grpc_chttp2_begin_write_result r;
if (t->closed) {
r.writing = false;
} else {
r = grpc_chttp2_begin_write(exec_ctx, t);
}
if (r.writing) {
if (r.partial) {
GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
}
if (!t->is_first_write_in_batch) {
GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
}
grpc_closure_scheduler *scheduler =
write_scheduler(t, r.early_results_scheduled, r.partial);
if (scheduler != grpc_schedule_on_exec_ctx) {
GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
}
set_write_state(
exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
: GRPC_CHTTP2_WRITE_STATE_WRITING,
begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
}
GPR_TIMER_END("write_action_begin_locked", 0);
}
static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
grpc_chttp2_transport *t = gt;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
GPR_TIMER_BEGIN("write_action", 0);
grpc_endpoint_write(
exec_ctx, t->ep, &t->outbuf,
@ -917,7 +961,7 @@ static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (error != GRPC_ERROR_NONE) {
close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
@ -942,7 +986,8 @@ static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"continue writing [!covered]");
"continue writing");
t->is_first_write_in_batch = false;
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
GRPC_CLOSURE_RUN(
exec_ctx,
@ -1042,9 +1087,7 @@ static void maybe_start_some_streams(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
post_destructive_reclaimer(exec_ctx, t);
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
"new_stream");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "new_stream");
}
/* cancel out streams that will never be started */
while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
@ -1093,12 +1136,14 @@ void grpc_chttp2_complete_closure_step(grpc_exec_ctx *exec_ctx,
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
if (GRPC_TRACER_ON(grpc_http_trace)) {
const char *errstr = grpc_error_string(error);
gpr_log(GPR_DEBUG,
"complete_closure_step: %p refs=%d flags=0x%04x desc=%s err=%s",
closure,
(int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
(int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT),
desc, errstr);
gpr_log(
GPR_DEBUG,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s",
t, closure,
(int)(closure->next_data.scratch / CLOSURE_BARRIER_FIRST_REF_BIT),
(int)(closure->next_data.scratch % CLOSURE_BARRIER_FIRST_REF_BIT), desc,
errstr, write_state_name(t->write_state));
}
if (error != GRPC_ERROR_NONE) {
if (closure->error_data.error == GRPC_ERROR_NONE) {
@ -1139,9 +1184,7 @@ static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx *exec_ctx,
grpc_chttp2_stream *s) {
if (s->id != 0 && (!s->write_buffering ||
s->flow_controlled_buffer.length > t->write_buffer_size)) {
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
"op.send_message");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "op.send_message");
}
}
@ -1173,15 +1216,19 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
} else {
grpc_chttp2_write_cb *cb = t->write_cb_pool;
if (cb == NULL) {
cb = gpr_malloc(sizeof(*cb));
cb = (grpc_chttp2_write_cb *)gpr_malloc(sizeof(*cb));
} else {
t->write_cb_pool = cb->next;
}
cb->call_at_byte = notify_offset;
cb->closure = s->fetching_send_message_finished;
s->fetching_send_message_finished = NULL;
cb->next = s->on_write_finished_cbs;
s->on_write_finished_cbs = cb;
grpc_chttp2_write_cb **list =
s->fetching_send_message->flags & GRPC_WRITE_THROUGH
? &s->on_write_finished_cbs
: &s->on_flow_controlled_cbs;
cb->next = *list;
*list = cb;
}
s->fetching_send_message = NULL;
return; /* early out */
@ -1201,7 +1248,7 @@ static void continue_fetching_send_locked(grpc_exec_ctx *exec_ctx,
static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
grpc_error *error) {
grpc_chttp2_stream *s = gs;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
grpc_chttp2_transport *t = s->t;
if (error == GRPC_ERROR_NONE) {
error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
@ -1236,8 +1283,9 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
grpc_error *error_ignored) {
GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
grpc_transport_stream_op_batch *op = stream_op;
grpc_chttp2_stream *s = op->handler_private.extra_arg;
grpc_transport_stream_op_batch *op =
(grpc_transport_stream_op_batch *)stream_op;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)op->handler_private.extra_arg;
grpc_transport_stream_op_batch_payload *op_payload = op->payload;
grpc_chttp2_transport *t = s->t;
@ -1290,7 +1338,8 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
if ((s->stream_compression_send_enabled =
(op_payload->send_initial_metadata.send_initial_metadata->idx.named
.content_encoding != NULL)) == true) {
s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
s->compressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->compressed_data_buffer);
}
@ -1336,14 +1385,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
}
} else {
GPR_ASSERT(s->id != 0);
grpc_chttp2_stream_write_type write_type =
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED;
bool initiate_write = true;
if (op->send_message &&
(op->payload->send_message.send_message->flags &
GRPC_WRITE_BUFFER_HINT)) {
write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
initiate_write = false;
}
grpc_chttp2_become_writable(exec_ctx, t, s, write_type,
grpc_chttp2_become_writable(exec_ctx, t, s, initiate_write,
"op.send_initial_metadata");
}
} else {
@ -1452,8 +1500,7 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op,
} else if (s->id != 0) {
/* TODO(ctiller): check if there's flow control for any outstanding
bytes before going writable */
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"op.send_trailing_metadata");
}
}
@ -1578,7 +1625,7 @@ static void send_ping_locked(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void retry_initiate_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
t->ping_state.is_delayed_ping_timer_set = false;
grpc_chttp2_initiate_write(exec_ctx, t, "retry_send_ping");
}
@ -1634,8 +1681,9 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
static void perform_transport_op_locked(grpc_exec_ctx *exec_ctx,
void *stream_op,
grpc_error *error_ignored) {
grpc_transport_op *op = stream_op;
grpc_chttp2_transport *t = op->handler_private.extra_arg;
grpc_transport_op *op = (grpc_transport_op *)stream_op;
grpc_chttp2_transport *t =
(grpc_chttp2_transport *)op->handler_private.extra_arg;
grpc_error *close_transport = op->disconnect_with_error;
if (op->goaway_error) {
@ -1847,7 +1895,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx,
static void remove_stream(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
uint32_t id, grpc_error *error) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_delete(&t->stream_map, id);
grpc_chttp2_stream *s =
(grpc_chttp2_stream *)grpc_chttp2_stream_map_delete(&t->stream_map, id);
GPR_ASSERT(s);
if (t->incoming_stream == s) {
t->incoming_stream = NULL;
@ -1979,6 +2028,21 @@ static grpc_error *removal_error(grpc_error *extra_error, grpc_chttp2_stream *s,
return error;
}
static void flush_write_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_chttp2_write_cb **list,
grpc_error *error) {
while (*list) {
grpc_chttp2_write_cb *cb = *list;
*list = cb->next;
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
GRPC_ERROR_REF(error),
"on_write_finished_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s, grpc_error *error) {
@ -1998,16 +2062,9 @@ void grpc_chttp2_fail_pending_writes(grpc_exec_ctx *exec_ctx,
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
"fetching_send_message_finished");
while (s->on_write_finished_cbs) {
grpc_chttp2_write_cb *cb = s->on_write_finished_cbs;
s->on_write_finished_cbs = cb->next;
grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
GRPC_ERROR_REF(error),
"on_write_finished_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
GRPC_ERROR_UNREF(error);
flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
GRPC_ERROR_REF(error));
flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
}
void grpc_chttp2_mark_stream_closed(grpc_exec_ctx *exec_ctx,
@ -2227,8 +2284,8 @@ typedef struct {
} cancel_stream_cb_args;
static void cancel_stream_cb(void *user_data, uint32_t key, void *stream) {
cancel_stream_cb_args *args = user_data;
grpc_chttp2_stream *s = stream;
cancel_stream_cb_args *args = (cancel_stream_cb_args *)user_data;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)stream;
grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
GRPC_ERROR_REF(args->error));
}
@ -2252,13 +2309,11 @@ void grpc_chttp2_act_on_flowctl_action(grpc_exec_ctx *exec_ctx,
case GRPC_CHTTP2_FLOWCTL_NO_ACTION_NEEDED:
break;
case GRPC_CHTTP2_FLOWCTL_UPDATE_IMMEDIATELY:
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"immediate stream flowctl");
break;
case GRPC_CHTTP2_FLOWCTL_QUEUE_UPDATE:
grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
grpc_chttp2_become_writable(exec_ctx, t, s, false,
"queue stream flowctl");
break;
}
@ -2330,7 +2385,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
GPR_TIMER_BEGIN("reading_action_locked", 0);
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
GRPC_ERROR_REF(error);
@ -2371,9 +2426,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
if (t->flow_control.initial_window_update > 0) {
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
grpc_chttp2_become_writable(
exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
"unstalled");
grpc_chttp2_become_writable(exec_ctx, t, s, true, "unstalled");
}
}
t->flow_control.initial_window_update = 0;
@ -2415,7 +2468,7 @@ static void read_action_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping", t->peer_string);
}
@ -2428,7 +2481,7 @@ static void start_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
static void finish_bdp_ping_locked(grpc_exec_ctx *exec_ctx, void *tp,
grpc_error *error) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
if (GRPC_TRACER_ON(grpc_http_trace)) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping", t->peer_string);
}
@ -2477,7 +2530,7 @@ void grpc_chttp2_config_default_keepalive_args(grpc_channel_args *args,
static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || t->closed) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@ -2507,7 +2560,7 @@ static void init_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
@ -2516,7 +2569,7 @@ static void start_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
@ -2532,7 +2585,7 @@ static void finish_keepalive_ping_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void keepalive_watchdog_fired_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (error == GRPC_ERROR_NONE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@ -2613,7 +2666,8 @@ static void incoming_byte_stream_unref(grpc_exec_ctx *exec_ctx,
static void incoming_byte_stream_next_locked(grpc_exec_ctx *exec_ctx,
void *argp,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = argp;
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)argp;
grpc_chttp2_transport *t = bs->transport;
grpc_chttp2_stream *s = bs->stream;
@ -2823,7 +2877,8 @@ static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
static void incoming_byte_stream_destroy_locked(grpc_exec_ctx *exec_ctx,
void *byte_stream,
grpc_error *error_ignored) {
grpc_chttp2_incoming_byte_stream *bs = byte_stream;
grpc_chttp2_incoming_byte_stream *bs =
(grpc_chttp2_incoming_byte_stream *)byte_stream;
grpc_chttp2_stream *s = bs->stream;
grpc_chttp2_transport *t = s->t;
@ -2879,7 +2934,7 @@ static void post_destructive_reclaimer(grpc_exec_ctx *exec_ctx,
static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
if (error == GRPC_ERROR_NONE &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
/* Channel with no active streams: send a goaway to try and make it
@ -2909,11 +2964,12 @@ static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_chttp2_transport *t = arg;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)arg;
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (error == GRPC_ERROR_NONE && n > 0) {
grpc_chttp2_stream *s = grpc_chttp2_stream_map_rand(&t->stream_map);
grpc_chttp2_stream *s =
(grpc_chttp2_stream *)grpc_chttp2_stream_map_rand(&t->stream_map);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id);
@ -2957,10 +3013,13 @@ static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
destroy_transport,
chttp2_get_endpoint};
static const grpc_transport_vtable *get_vtable(void) { return &vtable; }
grpc_transport *grpc_create_chttp2_transport(
grpc_exec_ctx *exec_ctx, const grpc_channel_args *channel_args,
grpc_endpoint *ep, int is_client) {
grpc_chttp2_transport *t = gpr_zalloc(sizeof(grpc_chttp2_transport));
grpc_chttp2_transport *t =
(grpc_chttp2_transport *)gpr_zalloc(sizeof(grpc_chttp2_transport));
init_transport(exec_ctx, t, channel_args, ep, is_client != 0);
return &t->base;
}

@ -46,7 +46,7 @@ grpc_error *grpc_chttp2_goaway_parser_begin_frame(grpc_chttp2_goaway_parser *p,
gpr_free(p->debug_data);
p->debug_length = length - 8;
p->debug_data = gpr_malloc(p->debug_length);
p->debug_data = (char *)gpr_malloc(p->debug_length);
p->debug_pos = 0;
p->state = GRPC_CHTTP2_GOAWAY_LSI0;
return GRPC_ERROR_NONE;
@ -60,7 +60,7 @@ grpc_error *grpc_chttp2_goaway_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_goaway_parser *p = parser;
grpc_chttp2_goaway_parser *p = (grpc_chttp2_goaway_parser *)parser;
switch (p->state) {
case GRPC_CHTTP2_GOAWAY_LSI0:

@ -75,7 +75,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_ping_parser *p = parser;
grpc_chttp2_ping_parser *p = (grpc_chttp2_ping_parser *)parser;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes |= (((uint64_t)*cur) << (56 - 8 * p->byte));
@ -111,7 +111,7 @@ grpc_error *grpc_chttp2_ping_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
if (!g_disable_ping_ack) {
if (t->ping_ack_count == t->ping_ack_capacity) {
t->ping_ack_capacity = GPR_MAX(t->ping_ack_capacity * 3 / 2, 3);
t->ping_acks = gpr_realloc(
t->ping_acks = (uint64_t *)gpr_realloc(
t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
}
t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;

@ -77,7 +77,7 @@ grpc_error *grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx *exec_ctx,
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_rst_stream_parser *p = parser;
grpc_chttp2_rst_stream_parser *p = (grpc_chttp2_rst_stream_parser *)parser;
while (p->byte != 4 && cur != end) {
p->reason_bytes[p->byte] = *cur;

@ -111,7 +111,7 @@ grpc_error *grpc_chttp2_settings_parser_parse(grpc_exec_ctx *exec_ctx, void *p,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
grpc_chttp2_settings_parser *parser = p;
grpc_chttp2_settings_parser *parser = (grpc_chttp2_settings_parser *)p;
const uint8_t *cur = GRPC_SLICE_START_PTR(slice);
const uint8_t *end = GRPC_SLICE_END_PTR(slice);
char *msg;

@ -70,7 +70,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
uint8_t *const beg = GRPC_SLICE_START_PTR(slice);
uint8_t *const end = GRPC_SLICE_END_PTR(slice);
uint8_t *cur = beg;
grpc_chttp2_window_update_parser *p = parser;
grpc_chttp2_window_update_parser *p =
(grpc_chttp2_window_update_parser *)parser;
while (p->byte != 4 && cur != end) {
p->amount |= ((uint32_t)*cur) << (8 * (3 - p->byte));
@ -98,9 +99,8 @@ grpc_error *grpc_chttp2_window_update_parser_parse(
grpc_chttp2_flowctl_recv_stream_update(
&t->flow_control, &s->flow_control, received_update);
if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
grpc_chttp2_become_writable(
exec_ctx, t, s, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
"stream.read_flow_control");
grpc_chttp2_become_writable(exec_ctx, t, s, true,
"stream.read_flow_control");
}
}
} else {

@ -536,7 +536,7 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor *c) {
c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size =
gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
(uint16_t *)gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems);
memset(c->table_elem_size, 0,
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
@ -564,7 +564,8 @@ void grpc_chttp2_hpack_compressor_set_max_usable_size(
}
static void rebuild_elems(grpc_chttp2_hpack_compressor *c, uint32_t new_cap) {
uint16_t *table_elem_size = gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint16_t *table_elem_size =
(uint16_t *)gpr_malloc(sizeof(*table_elem_size) * new_cap);
uint32_t i;
memset(table_elem_size, 0, sizeof(*table_elem_size) * new_cap);

@ -1284,7 +1284,7 @@ static void append_bytes(grpc_chttp2_hpack_parser_string *str,
GPR_ASSERT(str->data.copied.length + length <= UINT32_MAX);
str->data.copied.capacity = (uint32_t)(str->data.copied.length + length);
str->data.copied.str =
gpr_realloc(str->data.copied.str, str->data.copied.capacity);
(char *)gpr_realloc(str->data.copied.str, str->data.copied.capacity);
}
memcpy(str->data.copied.str + str->data.copied.length, data, length);
GPR_ASSERT(length <= UINT32_MAX - str->data.copied.length);
@ -1643,7 +1643,7 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
grpc_chttp2_stream *s = sp;
grpc_chttp2_stream *s = (grpc_chttp2_stream *)sp;
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
grpc_slice_buffer_add(
@ -1665,7 +1665,8 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx,
if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) {
if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) {
s->stream_compression_recv_enabled = true;
s->decompressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer));
s->decompressed_data_buffer =
(grpc_slice_buffer *)gpr_malloc(sizeof(grpc_slice_buffer));
grpc_slice_buffer_init(s->decompressed_data_buffer);
}
}
@ -1677,7 +1678,7 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_slice slice, int is_last) {
grpc_chttp2_hpack_parser *parser = hpack_parser;
grpc_chttp2_hpack_parser *parser = (grpc_chttp2_hpack_parser *)hpack_parser;
GPR_TIMER_BEGIN("grpc_chttp2_hpack_parser_parse", 0);
if (s != NULL) {
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);

@ -173,7 +173,7 @@ void grpc_chttp2_hptbl_init(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
GRPC_CHTTP2_INITIAL_HPACK_TABLE_SIZE;
tbl->max_entries = tbl->cap_entries =
entries_for_bytes(tbl->current_table_bytes);
tbl->ents = gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
tbl->ents = (grpc_mdelem *)gpr_malloc(sizeof(*tbl->ents) * tbl->cap_entries);
memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
@ -228,7 +228,7 @@ static void evict1(grpc_exec_ctx *exec_ctx, grpc_chttp2_hptbl *tbl) {
}
static void rebuild_ents(grpc_chttp2_hptbl *tbl, uint32_t new_cap) {
grpc_mdelem *ents = gpr_malloc(sizeof(*ents) * new_cap);
grpc_mdelem *ents = (grpc_mdelem *)gpr_malloc(sizeof(*ents) * new_cap);
uint32_t i;
for (i = 0; i < tbl->num_ents; i++) {

@ -262,6 +262,10 @@ struct grpc_chttp2_transport {
/** write execution state of the transport */
grpc_chttp2_write_state write_state;
/** is this the first write in a series of writes?
set when we initiate writing from idle, cleared when we
initiate writing from writing+more */
bool is_first_write_in_batch;
/** is the transport destroying itself? */
uint8_t destroying;
@ -483,6 +487,7 @@ struct grpc_chttp2_stream {
grpc_slice fetching_slice;
int64_t next_message_end_offset;
int64_t flow_controlled_bytes_written;
int64_t flow_controlled_bytes_flowed;
grpc_closure complete_fetch_locked;
grpc_closure *fetching_send_message_finished;
@ -555,6 +560,7 @@ struct grpc_chttp2_stream {
grpc_slice_buffer flow_controlled_buffer;
grpc_chttp2_write_cb *on_flow_controlled_cbs;
grpc_chttp2_write_cb *on_write_finished_cbs;
grpc_chttp2_write_cb *finish_after_write;
size_t sending_bytes;
@ -595,10 +601,13 @@ struct grpc_chttp2_stream {
void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, const char *reason);
typedef enum {
GRPC_CHTTP2_NOTHING_TO_WRITE,
GRPC_CHTTP2_PARTIAL_WRITE,
GRPC_CHTTP2_FULL_WRITE,
typedef struct {
/** are we writing? */
bool writing;
/** if writing: was it a complete flush (false) or a partial flush (true) */
bool partial;
/** did we queue any completions as part of beginning the write */
bool early_results_scheduled;
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
@ -840,22 +849,12 @@ void grpc_chttp2_ack_ping(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t);
typedef enum {
/* don't initiate a transport write, but piggyback on the next one */
GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK,
/* initiate a covered write */
GRPC_CHTTP2_STREAM_WRITE_INITIATE_COVERED,
/* initiate an uncovered write */
GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED
} grpc_chttp2_stream_write_type;
/** add a ref to the stream and add it to the writable list;
ref will be dropped in writing.c */
void grpc_chttp2_become_writable(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t,
grpc_chttp2_stream *s,
grpc_chttp2_stream_write_type type,
const char *reason);
bool also_initiate_write, const char *reason);
void grpc_chttp2_cancel_stream(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_stream *s,

@ -106,7 +106,8 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
return err;
}
++cur;
++t->deframe_state;
t->deframe_state =
(grpc_chttp2_deframe_transport_state)(1 + (int)t->deframe_state);
}
if (cur == end) {
return GRPC_ERROR_NONE;
@ -402,7 +403,7 @@ static void free_timeout(void *p) { gpr_free(p); }
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_initial_header", 0);
@ -430,7 +431,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_millis timeout;
if (cached_timeout == NULL) {
/* not already parsed: parse it now, and store the result away */
cached_timeout = gpr_malloc(sizeof(grpc_millis));
cached_timeout = (grpc_millis *)gpr_malloc(sizeof(grpc_millis));
if (!grpc_http2_decode_timeout(GRPC_MDVALUE(md), cached_timeout)) {
char *val = grpc_slice_to_c_string(GRPC_MDVALUE(md));
gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'", val);
@ -481,7 +482,7 @@ static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
static void on_trailing_header(grpc_exec_ctx *exec_ctx, void *tp,
grpc_mdelem md) {
grpc_chttp2_transport *t = tp;
grpc_chttp2_transport *t = (grpc_chttp2_transport *)tp;
grpc_chttp2_stream *s = t->incoming_stream;
GPR_TIMER_BEGIN("on_trailing_header", 0);

@ -27,8 +27,8 @@
void grpc_chttp2_stream_map_init(grpc_chttp2_stream_map *map,
size_t initial_capacity) {
GPR_ASSERT(initial_capacity > 1);
map->keys = gpr_malloc(sizeof(uint32_t) * initial_capacity);
map->values = gpr_malloc(sizeof(void *) * initial_capacity);
map->keys = (uint32_t *)gpr_malloc(sizeof(uint32_t) * initial_capacity);
map->values = (void **)gpr_malloc(sizeof(void *) * initial_capacity);
map->count = 0;
map->free = 0;
map->capacity = initial_capacity;

@ -122,15 +122,18 @@ static void maybe_initiate_ping(grpc_exec_ctx *exec_ctx,
(t->ping_state.pings_before_data_required != 0);
}
static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static bool update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s, int64_t send_bytes,
grpc_chttp2_write_cb **list, grpc_error *error) {
grpc_chttp2_write_cb **list, int64_t *ctr,
grpc_error *error) {
bool sched_any = false;
grpc_chttp2_write_cb *cb = *list;
*list = NULL;
s->flow_controlled_bytes_written += send_bytes;
*ctr += send_bytes;
while (cb) {
grpc_chttp2_write_cb *next = cb->next;
if (cb->call_at_byte <= s->flow_controlled_bytes_written) {
if (cb->call_at_byte <= *ctr) {
sched_any = true;
finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
} else {
add_to_write_list(list, cb);
@ -138,6 +141,7 @@ static void update_list(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
cb = next;
}
GRPC_ERROR_UNREF(error);
return sched_any;
}
static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
@ -163,6 +167,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s;
/* stats histogram counters: we increment these throughout this function,
and at the end publish to the central stats histograms */
int flow_control_writes = 0;
int initial_metadata_writes = 0;
int trailing_metadata_writes = 0;
int message_writes = 0;
GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
@ -176,6 +187,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->force_send_settings = 0;
t->dirtied_local_settings = 0;
t->sent_local_settings = 1;
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
}
/* simple writes are queued to qbuf, and flushed here */
@ -195,13 +207,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
}
}
bool partial_write = false;
grpc_chttp2_begin_write_result result = {false, false, false};
/* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */
while (true) {
if (t->outbuf.length > target_write_size(t)) {
partial_write = true;
result.partial = true;
break;
}
@ -245,13 +257,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
.stats = &s->stats.outgoing};
grpc_chttp2_encode_header(exec_ctx, &t->hpack_compressor, NULL, 0,
s->send_initial_metadata, &hopt, &t->outbuf);
now_writing = true;
t->ping_state.pings_before_data_required =
t->ping_policy.max_pings_without_data;
if (!t->is_client) {
t->ping_recv_state.last_ping_recv_time = 0;
t->ping_recv_state.ping_strikes = 0;
}
initial_metadata_writes++;
} else {
GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_INFO, "not sending initial_metadata (Trailers-Only)"));
@ -267,10 +279,15 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
[num_extra_headers_for_trailing_metadata++] =
&s->send_initial_metadata->idx.named.content_type->md;
}
trailing_metadata_writes++;
}
s->send_initial_metadata = NULL;
s->sent_initial_metadata = true;
sent_initial_metadata = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_NONE,
"send_initial_metadata_finished");
}
/* send any window updates */
@ -286,6 +303,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
t->ping_recv_state.last_ping_recv_time = 0;
t->ping_recv_state.ping_strikes = 0;
}
flow_control_writes++;
}
if (sent_initial_metadata) {
/* send any body bytes, if allowed by flow control */
@ -304,6 +322,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
if (max_outgoing > 0) {
bool is_last_data_frame = false;
bool is_last_frame = false;
size_t sending_bytes_before = s->sending_bytes;
if (s->stream_compression_send_enabled) {
while ((s->flow_controlled_buffer.length > 0 ||
s->compressed_data_buffer->length > 0) &&
@ -370,6 +389,11 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
&s->stats.outgoing));
}
}
result.early_results_scheduled |=
update_list(exec_ctx, t, s,
(int64_t)(s->sending_bytes - sending_bytes_before),
&s->on_flow_controlled_cbs,
&s->flow_controlled_bytes_flowed, GRPC_ERROR_NONE);
now_writing = true;
if (s->flow_controlled_buffer.length > 0 ||
(s->stream_compression_send_enabled &&
@ -377,6 +401,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork");
grpc_chttp2_list_add_writable_stream(t, s);
}
message_writes++;
} else if (t->flow_control.remote_window == 0) {
grpc_chttp2_list_add_stalled_by_transport(t, s);
now_writing = true;
@ -412,6 +437,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
num_extra_headers_for_trailing_metadata,
s->send_trailing_metadata, &hopt,
&t->outbuf);
trailing_metadata_writes++;
}
s->send_trailing_metadata = NULL;
s->sent_trailing_metadata = true;
@ -421,10 +447,22 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
s->id, GRPC_HTTP2_NO_ERROR, &s->stats.outgoing));
}
now_writing = true;
result.early_results_scheduled = true;
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_NONE, "send_trailing_metadata_finished");
}
}
if (now_writing) {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
exec_ctx, initial_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes);
GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
exec_ctx, trailing_metadata_writes);
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx,
flow_control_writes);
if (!grpc_chttp2_list_add_writing_stream(t, s)) {
/* already in writing list: drop ref */
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
@ -461,9 +499,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
GPR_TIMER_END("grpc_chttp2_begin_write", 0);
return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
: GRPC_CHTTP2_FULL_WRITE)
: GRPC_CHTTP2_NOTHING_TO_WRITE;
result.writing = t->outbuf.count > 0;
return result;
}
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
@ -472,20 +509,13 @@ void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_chttp2_stream *s;
while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
if (s->sent_initial_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error), "send_initial_metadata_finished");
}
if (s->sending_bytes != 0) {
update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
&s->on_write_finished_cbs, GRPC_ERROR_REF(error));
&s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
GRPC_ERROR_REF(error));
s->sending_bytes = 0;
}
if (s->sent_trailing_metadata) {
grpc_chttp2_complete_closure_step(
exec_ctx, t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, !t->is_client, 1,
GRPC_ERROR_REF(error));
}

@ -120,7 +120,7 @@ static void slice_buffer_list_append_entry(slice_buffer_list *l,
}
static grpc_slice_buffer *slice_buffer_list_append(slice_buffer_list *l) {
sb_list_entry *next = gpr_malloc(sizeof(*next));
sb_list_entry *next = (sb_list_entry *)gpr_malloc(sizeof(*next));
grpc_slice_buffer_init(&next->sb);
slice_buffer_list_append_entry(l, next);
return &next->sb;
@ -327,7 +327,8 @@ static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
grpc_error *error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem *elem = metadata->list.head;
(elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
grpc_linked_mdelem *nelem = gpr_arena_alloc(s->arena, sizeof(*nelem));
grpc_linked_mdelem *nelem =
(grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*nelem));
nelem->md = grpc_mdelem_from_slices(
exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@ -531,12 +532,14 @@ static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
grpc_linked_mdelem *path_md =
(grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*path_md));
path_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
GRPC_ERROR_NONE);
grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
grpc_linked_mdelem *auth_md =
(grpc_linked_mdelem *)gpr_arena_alloc(s->arena, sizeof(*auth_md));
auth_md->md =
grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
@ -1172,8 +1175,8 @@ static void inproc_transports_create(grpc_exec_ctx *exec_ctx,
grpc_transport **client_transport,
const grpc_channel_args *client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create");
inproc_transport *st = gpr_zalloc(sizeof(*st));
inproc_transport *ct = gpr_zalloc(sizeof(*ct));
inproc_transport *st = (inproc_transport *)gpr_zalloc(sizeof(*st));
inproc_transport *ct = (inproc_transport *)gpr_zalloc(sizeof(*ct));
// Share one lock between both sides since both sides get affected
st->mu = ct->mu = gpr_malloc(sizeof(*st->mu));
gpr_mu_init(&st->mu->mu);

@ -86,13 +86,14 @@ grpc_channel_args *grpc_channel_args_copy_and_add_and_remove(
}
}
// Create result.
grpc_channel_args *dst = gpr_malloc(sizeof(grpc_channel_args));
grpc_channel_args *dst =
(grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
dst->num_args = num_args_to_copy + num_to_add;
if (dst->num_args == 0) {
dst->args = NULL;
return dst;
}
dst->args = gpr_malloc(sizeof(grpc_arg) * dst->num_args);
dst->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * dst->num_args);
// Copy args from src that are not being removed.
size_t dst_idx = 0;
if (src != NULL) {
@ -117,7 +118,7 @@ grpc_channel_args *grpc_channel_args_copy(const grpc_channel_args *src) {
grpc_channel_args *grpc_channel_args_union(const grpc_channel_args *a,
const grpc_channel_args *b) {
const size_t max_out = (a->num_args + b->num_args);
grpc_arg *uniques = gpr_malloc(sizeof(*uniques) * max_out);
grpc_arg *uniques = (grpc_arg *)gpr_malloc(sizeof(*uniques) * max_out);
for (size_t i = 0; i < a->num_args; ++i) uniques[i] = a->args[i];
size_t uniques_idx = a->num_args;
@ -160,24 +161,25 @@ static int cmp_arg(const grpc_arg *a, const grpc_arg *b) {
/* stabilizing comparison function: since channel_args ordering matters for
* keys with the same name, we need to preserve that ordering */
static int cmp_key_stable(const void *ap, const void *bp) {
const grpc_arg *const *a = ap;
const grpc_arg *const *b = bp;
const grpc_arg *const *a = (const grpc_arg *const *)ap;
const grpc_arg *const *b = (const grpc_arg *const *)bp;
int c = strcmp((*a)->key, (*b)->key);
if (c == 0) c = GPR_ICMP(*a, *b);
return c;
}
grpc_channel_args *grpc_channel_args_normalize(const grpc_channel_args *a) {
grpc_arg **args = gpr_malloc(sizeof(grpc_arg *) * a->num_args);
grpc_arg **args = (grpc_arg **)gpr_malloc(sizeof(grpc_arg *) * a->num_args);
for (size_t i = 0; i < a->num_args; i++) {
args[i] = &a->args[i];
}
if (a->num_args > 1)
qsort(args, a->num_args, sizeof(grpc_arg *), cmp_key_stable);
grpc_channel_args *b = gpr_malloc(sizeof(grpc_channel_args));
grpc_channel_args *b =
(grpc_channel_args *)gpr_malloc(sizeof(grpc_channel_args));
b->num_args = a->num_args;
b->args = gpr_malloc(sizeof(grpc_arg) * b->num_args);
b->args = (grpc_arg *)gpr_malloc(sizeof(grpc_arg) * b->num_args);
for (size_t i = 0; i < a->num_args; i++) {
b->args[i] = copy_arg(args[i]);
}

@ -51,7 +51,8 @@ struct grpc_channel_stack_builder_iterator {
};
grpc_channel_stack_builder *grpc_channel_stack_builder_create(void) {
grpc_channel_stack_builder *b = gpr_zalloc(sizeof(*b));
grpc_channel_stack_builder *b =
(grpc_channel_stack_builder *)gpr_zalloc(sizeof(*b));
b->begin.filter = NULL;
b->end.filter = NULL;
@ -76,7 +77,8 @@ const char *grpc_channel_stack_builder_get_target(
static grpc_channel_stack_builder_iterator *create_iterator_at_filter_node(
grpc_channel_stack_builder *builder, filter_node *node) {
grpc_channel_stack_builder_iterator *it = gpr_malloc(sizeof(*it));
grpc_channel_stack_builder_iterator *it =
(grpc_channel_stack_builder_iterator *)gpr_malloc(sizeof(*it));
it->builder = builder;
it->node = node;
return it;
@ -212,7 +214,7 @@ bool grpc_channel_stack_builder_prepend_filter(
static void add_after(filter_node *before, const grpc_channel_filter *filter,
grpc_post_filter_create_init_func post_init_func,
void *user_data) {
filter_node *new = gpr_malloc(sizeof(*new));
filter_node *new = (filter_node *)gpr_malloc(sizeof(*new));
new->next = before->next;
new->prev = before;
new->next->prev = new->prev->next = new;

@ -100,8 +100,8 @@ static callback_state *get_state_for_batch(
static void con_start_transport_stream_op_batch(
grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_transport_stream_op_batch *batch) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
if (batch->recv_initial_metadata) {
callback_state *state = &calld->recv_initial_metadata_ready;
intercept_callback(
@ -136,7 +136,7 @@ static void con_start_transport_stream_op_batch(
static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
channel_data *chand = elem->channel_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_perform_op(exec_ctx, chand->transport, op);
}
@ -144,8 +144,8 @@ static void con_start_transport_op(grpc_exec_ctx *exec_ctx,
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
@ -158,8 +158,8 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_polling_entity *pollent) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_set_pops(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
}
@ -168,8 +168,8 @@ static void set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
const grpc_call_final_info *final_info,
grpc_closure *then_schedule_closure) {
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
call_data *calld = (call_data *)elem->call_data;
channel_data *chand = (channel_data *)elem->channel_data;
grpc_transport_destroy_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
@ -218,7 +218,7 @@ static void bind_transport(grpc_channel_stack *channel_stack,
channel_data *cd = (channel_data *)elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_connected_filter);
GPR_ASSERT(cd->transport == NULL);
cd->transport = t;
cd->transport = (grpc_transport *)t;
/* HACK(ctiller): increase call stack size for the channel to make space
for channel data. We need a cleaner (but performant) way to do this,
@ -226,7 +226,8 @@ static void bind_transport(grpc_channel_stack *channel_stack,
This is only "safe" because call stacks place no additional data after
the last call element, and the last call element MUST be the connected
channel. */
channel_stack->call_stack_size += grpc_transport_stream_size(t);
channel_stack->call_stack_size +=
grpc_transport_stream_size((grpc_transport *)t);
}
bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
@ -240,6 +241,6 @@ bool grpc_add_connected_filter(grpc_exec_ctx *exec_ctx,
}
grpc_stream *grpc_connected_channel_get_stream(grpc_call_element *elem) {
call_data *calld = elem->call_data;
call_data *calld = (call_data *)elem->call_data;
return TRANSPORT_STREAM_FROM_CALL_DATA(calld);
}

@ -84,7 +84,8 @@ struct grpc_handshake_manager {
};
grpc_handshake_manager* grpc_handshake_manager_create() {
grpc_handshake_manager* mgr = gpr_zalloc(sizeof(grpc_handshake_manager));
grpc_handshake_manager* mgr =
(grpc_handshake_manager*)gpr_zalloc(sizeof(grpc_handshake_manager));
gpr_mu_init(&mgr->mu);
gpr_ref_init(&mgr->refs, 1);
return mgr;
@ -137,8 +138,8 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
realloc_count = mgr->count * 2;
}
if (realloc_count > 0) {
mgr->handshakers =
gpr_realloc(mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
mgr->handshakers = (grpc_handshaker**)gpr_realloc(
mgr->handshakers, realloc_count * sizeof(grpc_handshaker*));
}
mgr->handshakers[mgr->count++] = handshaker;
gpr_mu_unlock(&mgr->mu);
@ -205,7 +206,7 @@ static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
// handshakers together.
static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
grpc_handshake_manager* mgr = arg;
grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
gpr_mu_lock(&mgr->mu);
bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
gpr_mu_unlock(&mgr->mu);
@ -219,7 +220,7 @@ static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
// Callback invoked when deadline is exceeded.
static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
grpc_handshake_manager* mgr = arg;
grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
if (error == GRPC_ERROR_NONE) { // Timer fired, rather than being cancelled.
grpc_handshake_manager_shutdown(
exec_ctx, mgr,
@ -241,7 +242,8 @@ void grpc_handshake_manager_do_handshake(
mgr->args.endpoint = endpoint;
mgr->args.args = grpc_channel_args_copy(channel_args);
mgr->args.user_data = user_data;
mgr->args.read_buffer = gpr_malloc(sizeof(*mgr->args.read_buffer));
mgr->args.read_buffer =
(grpc_slice_buffer*)gpr_malloc(sizeof(*mgr->args.read_buffer));
grpc_slice_buffer_init(mgr->args.read_buffer);
// Initialize state needed for calling handshakers.
mgr->acceptor = acceptor;

@ -34,7 +34,7 @@ typedef struct {
static void grpc_handshaker_factory_list_register(
grpc_handshaker_factory_list* list, bool at_start,
grpc_handshaker_factory* factory) {
list->list = gpr_realloc(
list->list = (grpc_handshaker_factory**)gpr_realloc(
list->list, (list->num_factories + 1) * sizeof(grpc_handshaker_factory*));
if (at_start) {
memmove(list->list + 1, list->list,

@ -33,7 +33,7 @@ static size_t g_num_cores;
void grpc_stats_init(void) {
g_num_cores = GPR_MAX(1, gpr_cpu_num_cores());
grpc_stats_per_cpu_storage =
gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
(grpc_stats_data *)gpr_zalloc(sizeof(grpc_stats_data) * g_num_cores);
}
void grpc_stats_shutdown(void) { gpr_free(grpc_stats_per_cpu_storage); }

@ -30,6 +30,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
"tcp_backup_pollers_created",
"tcp_backup_poller_polls",
"http2_op_batches",
"http2_op_cancel",
"http2_op_send_initial_metadata",
@ -38,16 +40,22 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"http2_op_recv_initial_metadata",
"http2_op_recv_message",
"http2_op_recv_trailing_metadata",
"http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
"http2_writes_offloaded",
"http2_writes_continued",
"http2_partial_writes",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
"combiner_locks_offloaded",
"executor_scheduled_items",
"executor_scheduled_short_items",
"executor_scheduled_long_items",
"executor_scheduled_to_self",
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
@ -59,6 +67,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
"process",
"Number of read syscalls (or equivalent - eg recvmsg) made by this process",
"Number of times a backup poller has been created (this can be expensive)",
"Number of polls performed on the backup poller",
"Number of batches received by HTTP2 transport",
"Number of cancelations received by HTTP2 transport",
"Number of batches containing send initial metadata",
@ -67,20 +77,39 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
"Number of HTTP2 pings sent by process", "Number of HTTP2 writes initiated",
"Number of settings frames sent", "Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "
"written",
"Number of HTTP2 writes that were made knowing there was still more data "
"to be written (we cap maximum write size to syscall_write)",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
"Number of final items scheduled against combiner locks",
"Number of combiner locks offloaded to different threads",
"Number of closures scheduled against the executor (gRPC thread pool)",
"Number of finite runtime closures scheduled against the executor (gRPC "
"thread pool)",
"Number of potentially infinite runtime closures scheduled against the "
"executor (gRPC thread pool)",
"Number of closures scheduled by the executor to the executor",
"Number of thread wakeups initiated within the executor",
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"tcp_write_size", "tcp_write_iov_size", "tcp_read_size",
"tcp_read_offer", "tcp_read_offer_iov_size", "http2_send_message_size",
"tcp_write_size",
"tcp_write_iov_size",
"tcp_read_size",
"tcp_read_offer",
"tcp_read_offer_iov_size",
"http2_send_message_size",
"http2_send_initial_metadata_per_write",
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_write",
@ -89,6 +118,10 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
"Number of streams initiated written per TCP write",
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
@ -273,15 +306,135 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_0, 64));
}
const int grpc_stats_histo_buckets[6] = {64, 64, 64, 64, 64, 64};
const int grpc_stats_histo_start[6] = {0, 64, 128, 192, 256, 320};
const int *const grpc_stats_histo_bucket_boundaries[6] = {
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
64));
}
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_2,
64));
}
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
const int grpc_stats_histo_buckets[10] = {64, 64, 64, 64, 64,
64, 64, 64, 64, 64};
const int grpc_stats_histo_start[10] = {0, 64, 128, 192, 256,
320, 384, 448, 512, 576};
const int *const grpc_stats_histo_bucket_boundaries[10] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0};
void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_2};
void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,
grpc_stats_inc_tcp_read_offer,
grpc_stats_inc_tcp_read_offer_iov_size,
grpc_stats_inc_http2_send_message_size};
grpc_stats_inc_http2_send_message_size,
grpc_stats_inc_http2_send_initial_metadata_per_write,
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write};

@ -32,6 +32,8 @@ typedef enum {
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
@ -40,16 +42,22 @@ typedef enum {
GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
@ -61,6 +69,10 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@ -78,7 +90,15 @@ typedef enum {
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 384
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 640
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -94,6 +114,11 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
@ -114,10 +139,18 @@ typedef enum {
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
@ -130,9 +163,12 @@ typedef enum {
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
@ -141,6 +177,8 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -159,10 +197,27 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[6];
extern const int grpc_stats_histo_start[6];
extern const int *const grpc_stats_histo_bucket_boundaries[6];
extern void (*const grpc_stats_inc_histogram[6])(grpc_exec_ctx *exec_ctx,
#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_initial_metadata_per_write(
grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx,
int x);
#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx), \
(int)(value))
void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_exec_ctx *exec_ctx, int x);
#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
extern const int grpc_stats_histo_buckets[10];
extern const int grpc_stats_histo_start[10];
extern const int *const grpc_stats_histo_bucket_boundaries[10];
extern void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -54,6 +54,10 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_read
- counter: tcp_backup_pollers_created
doc: Number of times a backup poller has been created (this can be expensive)
- counter: tcp_backup_poller_polls
doc: Number of polls performed on the backup poller
# chttp2
- counter: http2_op_batches
doc: Number of batches received by HTTP2 transport
@ -75,10 +79,36 @@
max: 16777216
buckets: 64
doc: Size of messages received by HTTP2 transport
- histogram: http2_send_initial_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams initiated written per TCP write
- histogram: http2_send_message_per_write
max: 1024
buckets: 64
doc: Number of streams whose payload was written per TCP write
- histogram: http2_send_trailing_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams terminated per TCP write
- histogram: http2_send_flowctl_per_write
max: 1024
buckets: 64
doc: Number of flow control updates written per TCP write
- counter: http2_settings_writes
doc: Number of settings frames sent
- counter: http2_pings_sent
doc: Number of HTTP2 pings sent by process
- counter: http2_writes_begun
doc: Number of HTTP2 writes initiated
- counter: http2_writes_offloaded
doc: Number of HTTP2 writes offloaded to the executor from application threads
- counter: http2_writes_continued
doc: Number of HTTP2 writes that finished seeing more data needed to be
written
- counter: http2_partial_writes
doc: Number of HTTP2 writes that were made knowing there was still more data
to be written (we cap maximum write size to syscall_write)
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
@ -90,11 +120,18 @@
- counter: combiner_locks_offloaded
doc: Number of combiner locks offloaded to different threads
# executor
- counter: executor_scheduled_items
doc: Number of closures scheduled against the executor (gRPC thread pool)
- counter: executor_scheduled_short_items
doc: Number of finite runtime closures scheduled against the executor
(gRPC thread pool)
- counter: executor_scheduled_long_items
doc: Number of potentially infinite runtime closures scheduled against the
executor (gRPC thread pool)
- counter: executor_scheduled_to_self
doc: Number of closures scheduled by the executor to the executor
- counter: executor_wakeup_initiated
doc: Number of thread wakeups initiated within the executor
- counter: executor_queue_drained
doc: Number of times an executor queue was drained
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor

@ -39,7 +39,7 @@ static tracer *tracers;
#endif
void grpc_register_tracer(grpc_tracer_flag *flag) {
tracer *t = gpr_malloc(sizeof(*t));
tracer *t = (tracer *)gpr_malloc(sizeof(*t));
t->flag = flag;
t->next = tracers;
TRACER_SET(*flag, false);
@ -53,10 +53,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
s = gpr_malloc(len + 1);
s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
*ss = gpr_realloc(*ss, sizeof(char **) * np);
*ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}

@ -98,7 +98,7 @@ grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request *request,
gpr_strvec_destroy(&out);
if (body_bytes) {
tmp = gpr_realloc(tmp, out_len + body_size);
tmp = (char *)gpr_realloc(tmp, out_len + body_size);
memcpy(tmp + out_len, body_bytes, body_size);
out_len += body_size;
}

@ -130,7 +130,7 @@ static void do_read(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
grpc_error *error) {
internal_request *req = user_data;
internal_request *req = (internal_request *)user_data;
size_t i;
for (i = 0; i < req->incoming.count; i++) {
@ -159,7 +159,7 @@ static void on_written(grpc_exec_ctx *exec_ctx, internal_request *req) {
}
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (error == GRPC_ERROR_NONE) {
on_written(exec_ctx, req);
} else {
@ -175,7 +175,7 @@ static void start_write(grpc_exec_ctx *exec_ctx, internal_request *req) {
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_endpoint *ep) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@ -189,7 +189,7 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
static void on_connected(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (!req->ep) {
next_address(exec_ctx, req, GRPC_ERROR_REF(error));
@ -226,7 +226,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req,
}
static void on_resolved(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
internal_request *req = arg;
internal_request *req = (internal_request *)arg;
if (error != GRPC_ERROR_NONE) {
finish(exec_ctx, req, GRPC_ERROR_REF(error));
return;
@ -243,7 +243,8 @@ static void internal_request_begin(grpc_exec_ctx *exec_ctx,
grpc_millis deadline, grpc_closure *on_done,
grpc_httpcli_response *response,
const char *name, grpc_slice request_text) {
internal_request *req = gpr_malloc(sizeof(internal_request));
internal_request *req =
(internal_request *)gpr_malloc(sizeof(internal_request));
memset(req, 0, sizeof(*req));
req->request_text = request_text;
grpc_http_parser_init(&req->parser, GRPC_HTTP_RESPONSE, response);

@ -28,7 +28,7 @@
grpc_tracer_flag grpc_http1_trace = GRPC_TRACER_INITIALIZER(false, "http1");
static char *buf2str(void *buffer, size_t length) {
char *out = gpr_malloc(length + 1);
char *out = (char *)gpr_malloc(length + 1);
memcpy(out, buffer, length);
out[length] = 0;
return out;
@ -197,7 +197,8 @@ static grpc_error *add_header(grpc_http_parser *parser) {
if (*hdr_count == parser->hdr_capacity) {
parser->hdr_capacity =
GPR_MAX(parser->hdr_capacity + 1, parser->hdr_capacity * 3 / 2);
*hdrs = gpr_realloc(*hdrs, parser->hdr_capacity * sizeof(**hdrs));
*hdrs = (grpc_http_header *)gpr_realloc(
*hdrs, parser->hdr_capacity * sizeof(**hdrs));
}
(*hdrs)[(*hdr_count)++] = hdr;
@ -255,7 +256,7 @@ static grpc_error *addbyte_body(grpc_http_parser *parser, uint8_t byte) {
if (*body_length == parser->body_capacity) {
parser->body_capacity = GPR_MAX(8, parser->body_capacity * 3 / 2);
*body = gpr_realloc((void *)*body, parser->body_capacity);
*body = (char *)gpr_realloc((void *)*body, parser->body_capacity);
}
(*body)[*body_length] = (char)byte;
(*body_length)++;

@ -109,7 +109,7 @@ typedef struct {
static void closure_wrapper(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_closure *wc = arg;
wrapped_closure *wc = (wrapped_closure *)arg;
grpc_iomgr_cb_func cb = wc->cb;
void *cb_arg = wc->cb_arg;
gpr_free(wc);
@ -124,7 +124,7 @@ grpc_closure *grpc_closure_create(const char *file, int line,
grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
grpc_closure_scheduler *scheduler) {
#endif
wrapped_closure *wc = gpr_malloc(sizeof(*wc));
wrapped_closure *wc = (wrapped_closure *)gpr_malloc(sizeof(*wc));
wc->cb = cb;
wc->cb_arg = cb_arg;
#ifndef NDEBUG

@ -74,14 +74,15 @@ static const grpc_closure_scheduler_vtable finally_scheduler = {
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
grpc_combiner *grpc_combiner_create(void) {
grpc_combiner *lock = gpr_zalloc(sizeof(*lock));
grpc_combiner *lock = (grpc_combiner *)gpr_zalloc(sizeof(*lock));
gpr_ref_init(&lock->refs, 1);
lock->scheduler.vtable = &scheduler;
lock->finally_scheduler.vtable = &finally_scheduler;
gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
gpr_mpscq_init(&lock->queue);
grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
return lock;
}
@ -193,7 +194,7 @@ static void move_next(grpc_exec_ctx *exec_ctx) {
}
static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_combiner *lock = arg;
grpc_combiner *lock = (grpc_combiner *)arg;
push_last_on_exec_ctx(exec_ctx, lock);
}

@ -211,7 +211,7 @@ static uint8_t get_placement(grpc_error **err, size_t size) {
#ifndef NDEBUG
grpc_error *orig = *err;
#endif
*err = gpr_realloc(
*err = (grpc_error *)gpr_realloc(
*err, sizeof(grpc_error) + (*err)->arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
@ -406,7 +406,8 @@ static grpc_error *copy_error_and_unref(grpc_error *in) {
if (in->arena_capacity - in->arena_size < (uint8_t)SLOTS_PER_STR) {
new_arena_capacity = (uint8_t)(3 * new_arena_capacity / 2);
}
out = gpr_malloc(sizeof(*in) + new_arena_capacity * sizeof(intptr_t));
out = (grpc_error *)gpr_malloc(sizeof(*in) +
new_arena_capacity * sizeof(intptr_t));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_error_refcount)) {
gpr_log(GPR_DEBUG, "%p create copying %p", out, in);
@ -530,7 +531,7 @@ typedef struct {
static void append_chr(char c, char **s, size_t *sz, size_t *cap) {
if (*sz == *cap) {
*cap = GPR_MAX(8, 3 * *cap / 2);
*s = gpr_realloc(*s, *cap);
*s = (char *)gpr_realloc(*s, *cap);
}
(*s)[(*sz)++] = c;
}
@ -582,7 +583,8 @@ static void append_esc_str(const uint8_t *str, size_t len, char **s, size_t *sz,
static void append_kv(kv_pairs *kvs, char *key, char *value) {
if (kvs->num_kvs == kvs->cap_kvs) {
kvs->cap_kvs = GPR_MAX(3 * kvs->cap_kvs / 2, 4);
kvs->kvs = gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
kvs->kvs =
(kv_pair *)gpr_realloc(kvs->kvs, sizeof(*kvs->kvs) * kvs->cap_kvs);
}
kvs->kvs[kvs->num_kvs].key = key;
kvs->kvs[kvs->num_kvs].value = value;
@ -695,8 +697,8 @@ static char *errs_string(grpc_error *err) {
}
static int cmp_kvs(const void *a, const void *b) {
const kv_pair *ka = a;
const kv_pair *kb = b;
const kv_pair *ka = (const kv_pair *)a;
const kv_pair *kb = (const kv_pair *)b;
return strcmp(ka->key, kb->key);
}

@ -261,7 +261,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
new_fd->fd = fd;
@ -443,8 +443,8 @@ static grpc_error *pollset_global_init(void) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighbourhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBOURHOODS);
g_neighbourhoods =
gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
g_neighbourhoods = (pollset_neighbourhood *)gpr_zalloc(
sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
for (size_t i = 0; i < g_num_neighbourhoods; i++) {
gpr_mu_init(&g_neighbourhoods[i].mu);
}

@ -278,7 +278,7 @@ static void ref_by(grpc_fd *fd, int n) {
}
static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_fd *fd = arg;
grpc_fd *fd = (grpc_fd *)arg;
/* Add the fd to the freelist */
grpc_iomgr_unregister_object(&fd->iomgr_object);
pollable_destroy(&fd->pollable);
@ -339,7 +339,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
}
pollable_init(&new_fd->pollable, PO_FD);
@ -555,7 +555,7 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error_unused) {
grpc_error *error = GRPC_ERROR_NONE;
grpc_pollset *pollset = arg;
grpc_pollset *pollset = (grpc_pollset *)arg;
gpr_mu_lock(&pollset->pollable.po.mu);
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
@ -998,7 +998,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static void unref_fd_no_longer_poller(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_fd *fd = arg;
grpc_fd *fd = (grpc_fd *)arg;
UNREF_BY(exec_ctx, fd, 2, "pollset_pollable");
}
@ -1067,7 +1067,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pss = gpr_zalloc(sizeof(*pss));
grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
po_init(&pss->po, PO_POLLSET_SET);
return pss;
}
@ -1229,7 +1229,7 @@ static void pg_broadcast(grpc_exec_ctx *exec_ctx, polling_group *from,
static void pg_create(grpc_exec_ctx *exec_ctx, polling_obj **initial_po,
size_t initial_po_count) {
/* assumes all polling objects in initial_po are locked */
polling_group *pg = gpr_malloc(sizeof(*pg));
polling_group *pg = (polling_group *)gpr_malloc(sizeof(*pg));
po_init(&pg->po, PO_POLLING_GROUP);
gpr_ref_init(&pg->refs, (int)initial_po_count);
for (size_t i = 0; i < initial_po_count; i++) {
@ -1339,7 +1339,7 @@ static void pg_merge(grpc_exec_ctx *exec_ctx, polling_group *a,
gpr_mu_lock(&po->mu);
if (unref_count == unref_cap) {
unref_cap = GPR_MAX(8, 3 * unref_cap / 2);
unref = gpr_realloc(unref, unref_cap * sizeof(*unref));
unref = (polling_group **)gpr_realloc(unref, unref_cap * sizeof(*unref));
}
unref[unref_count++] = po->group;
po->group = pg_ref(a);

@ -364,7 +364,8 @@ static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
if (pi->fd_cnt == pi->fd_capacity) {
pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
pi->fds =
(grpc_fd **)gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
}
pi->fds[pi->fd_cnt++] = fds[i];
@ -467,7 +468,7 @@ static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
*error = GRPC_ERROR_NONE;
pi = gpr_malloc(sizeof(*pi));
pi = (polling_island *)gpr_malloc(sizeof(*pi));
gpr_mu_init(&pi->mu);
pi->fd_cnt = 0;
pi->fd_capacity = 0;
@ -811,7 +812,7 @@ static grpc_fd *fd_create(int fd, const char *name) {
gpr_mu_unlock(&fd_freelist_mu);
if (new_fd == NULL) {
new_fd = gpr_malloc(sizeof(grpc_fd));
new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
gpr_mu_init(&new_fd->po.mu);
}
@ -1260,7 +1261,7 @@ static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
to the function pollset_work_and_unlock() will pick up the correct
epoll_fd */
} else {
grpc_fd *fd = data_ptr;
grpc_fd *fd = (grpc_fd *)data_ptr;
int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
int write_ev = ep_ev[i].events & EPOLLOUT;
@ -1556,7 +1557,7 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
*/
static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pss = gpr_malloc(sizeof(*pss));
grpc_pollset_set *pss = (grpc_pollset_set *)gpr_malloc(sizeof(*pss));
gpr_mu_init(&pss->po.mu);
pss->po.pi = NULL;
#ifndef NDEBUG
@ -1634,8 +1635,8 @@ void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
}
bool grpc_are_polling_islands_equal(void *p, void *q) {
polling_island *p1 = p;
polling_island *p2 = q;
polling_island *p1 = (polling_island *)p;
polling_island *p2 = (polling_island *)q;
/* Note: polling_island_lock_pair() may change p1 and p2 to point to the
latest polling islands in their respective linked lists */

@ -326,7 +326,7 @@ static void unref_by(grpc_fd *fd, int n) {
}
static grpc_fd *fd_create(int fd, const char *name) {
grpc_fd *r = gpr_malloc(sizeof(*r));
grpc_fd *r = (grpc_fd *)gpr_malloc(sizeof(*r));
gpr_mu_init(&r->mu);
gpr_atm_rel_store(&r->refst, 1);
r->shutdown = 0;
@ -841,8 +841,8 @@ static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (pollset->fd_count == pollset->fd_capacity) {
pollset->fd_capacity =
GPR_MAX(pollset->fd_capacity + 8, pollset->fd_count * 3 / 2);
pollset->fds =
gpr_realloc(pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
pollset->fds = (grpc_fd **)gpr_realloc(
pollset->fds, sizeof(grpc_fd *) * pollset->fd_capacity);
}
pollset->fds[pollset->fd_count++] = fd;
GRPC_FD_REF(fd, "multipoller");
@ -894,7 +894,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
worker.wakeup_fd = pollset->local_wakeup_cache;
pollset->local_wakeup_cache = worker.wakeup_fd->next;
} else {
worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
worker.wakeup_fd =
(grpc_cached_wakeup_fd *)gpr_malloc(sizeof(*worker.wakeup_fd));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
if (error != GRPC_ERROR_NONE) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
@ -949,8 +950,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
const size_t pfd_size = sizeof(*pfds) * (pollset->fd_count + 2);
const size_t watch_size = sizeof(*watchers) * (pollset->fd_count + 2);
void *buf = gpr_malloc(pfd_size + watch_size);
pfds = buf;
watchers = (void *)((char *)buf + pfd_size);
pfds = (struct pollfd *)buf;
watchers = (grpc_fd_watcher *)(void *)((char *)buf + pfd_size);
}
fd_count = 0;
@ -987,6 +988,10 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
}
if (r < 0) {
if (errno != EINTR) {
work_combine_error(&error, GRPC_OS_ERROR(errno, "poll"));
@ -1007,6 +1012,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
if (pfds[0].revents & POLLIN_CHECK) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset);
}
work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
}
@ -1014,6 +1022,11 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (watchers[i].fd == NULL) {
fd_end_poll(exec_ctx, &watchers[i], 0, 0, NULL);
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
}
fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
pfds[i].revents & POLLOUT_CHECK, pollset);
}
@ -1120,7 +1133,8 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
*/
static grpc_pollset_set *pollset_set_create(void) {
grpc_pollset_set *pollset_set = gpr_zalloc(sizeof(*pollset_set));
grpc_pollset_set *pollset_set =
(grpc_pollset_set *)gpr_zalloc(sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
@ -1163,9 +1177,9 @@ static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
pollset_set->pollset_capacity =
GPR_MAX(8, 2 * pollset_set->pollset_capacity);
pollset_set->pollsets =
gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
sizeof(*pollset_set->pollsets));
pollset_set->pollsets = (grpc_pollset **)gpr_realloc(
pollset_set->pollsets,
pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets));
}
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
@ -1214,9 +1228,9 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
bag->pollset_sets =
gpr_realloc(bag->pollset_sets,
bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
bag->pollset_sets = (grpc_pollset_set **)gpr_realloc(
bag->pollset_sets,
bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
@ -1253,7 +1267,7 @@ static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&pollset_set->mu);
if (pollset_set->fd_count == pollset_set->fd_capacity) {
pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
pollset_set->fds = gpr_realloc(
pollset_set->fds = (grpc_fd **)gpr_realloc(
pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
}
GRPC_FD_REF(fd, "pollset_set");
@ -1307,11 +1321,12 @@ static void cache_insert_locked(poll_args *args) {
}
static void init_result(poll_args *pargs) {
pargs->result = gpr_malloc(sizeof(poll_result));
pargs->result = (poll_result *)gpr_malloc(sizeof(poll_result));
gpr_ref_init(&pargs->result->refcount, 1);
pargs->result->watchers = NULL;
pargs->result->watchcount = 0;
pargs->result->fds = gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
pargs->result->fds =
(struct pollfd *)gpr_malloc(sizeof(struct pollfd) * pargs->nfds);
memcpy(pargs->result->fds, pargs->fds, sizeof(struct pollfd) * pargs->nfds);
pargs->result->nfds = pargs->nfds;
pargs->result->retval = 0;
@ -1350,7 +1365,7 @@ static poll_args *get_poller_locked(struct pollfd *fds, nfds_t count) {
return pargs;
}
poll_args *pargs = gpr_malloc(sizeof(struct poll_args));
poll_args *pargs = (poll_args *)gpr_malloc(sizeof(struct poll_args));
gpr_cv_init(&pargs->trigger);
pargs->fds = fds;
pargs->nfds = count;
@ -1397,7 +1412,8 @@ static void cache_poller_locked(poll_args *args) {
poll_args **old_active_pollers = poll_cache.active_pollers;
poll_cache.size = poll_cache.size * 2;
poll_cache.count = 0;
poll_cache.active_pollers = gpr_malloc(sizeof(void *) * poll_cache.size);
poll_cache.active_pollers =
(poll_args **)gpr_malloc(sizeof(void *) * poll_cache.size);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}
@ -1502,12 +1518,12 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
nfds_t nsockfds = 0;
poll_result *result = NULL;
gpr_mu_lock(&g_cvfds.mu);
pollcv = gpr_malloc(sizeof(cv_node));
pollcv = (cv_node *)gpr_malloc(sizeof(cv_node));
pollcv->next = NULL;
gpr_cv pollcv_cv;
gpr_cv_init(&pollcv_cv);
pollcv->cv = &pollcv_cv;
cv_node *fd_cvs = gpr_malloc(nfds * sizeof(cv_node));
cv_node *fd_cvs = (cv_node *)gpr_malloc(nfds * sizeof(cv_node));
for (i = 0; i < nfds; i++) {
fds[i].revents = 0;
@ -1539,7 +1555,8 @@ static int cvfd_poll(struct pollfd *fds, nfds_t nfds, int timeout) {
res = 0;
if (!skip_poll && nsockfds > 0) {
struct pollfd *pollfds = gpr_malloc(sizeof(struct pollfd) * nsockfds);
struct pollfd *pollfds =
(struct pollfd *)gpr_malloc(sizeof(struct pollfd) * nsockfds);
idx = 0;
for (i = 0; i < nfds; i++) {
if (fds[i].fd >= 0) {
@ -1602,7 +1619,8 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds = gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.cvfds =
(fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@ -1619,7 +1637,7 @@ static void global_cv_fd_table_init() {
poll_cache.size = 32;
poll_cache.count = 0;
poll_cache.free_pollers = NULL;
poll_cache.active_pollers = gpr_malloc(sizeof(void *) * 32);
poll_cache.active_pollers = (poll_args **)gpr_malloc(sizeof(void *) * 32);
for (unsigned int i = 0; i < poll_cache.size; i++) {
poll_cache.active_pollers[i] = NULL;
}

@ -76,10 +76,10 @@ static void add(const char *beg, const char *end, char ***ss, size_t *ns) {
size_t len;
GPR_ASSERT(end >= beg);
len = (size_t)(end - beg);
s = gpr_malloc(len + 1);
s = (char *)gpr_malloc(len + 1);
memcpy(s, beg, len);
s[len] = 0;
*ss = gpr_realloc(*ss, sizeof(char **) * np);
*ss = (char **)gpr_realloc(*ss, sizeof(char **) * np);
(*ss)[n] = s;
*ns = np;
}

@ -40,6 +40,7 @@ typedef struct {
grpc_closure_list elems;
size_t depth;
bool shutdown;
bool queued_long_job;
gpr_thd_id id;
} thread_state;
@ -50,6 +51,9 @@ static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
GPR_TLS_DECL(g_this_thread_state);
static grpc_tracer_flag executor_trace =
GRPC_TRACER_INITIALIZER(false, "executor");
static void executor_thread(void *arg);
static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
@ -59,6 +63,14 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
while (c != NULL) {
grpc_closure *next = c->next_data.next;
grpc_error *error = c->error_data.error;
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
#endif
}
#ifndef NDEBUG
c->scheduled = false;
#endif
@ -66,6 +78,7 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
GRPC_ERROR_UNREF(error);
c = next;
n++;
grpc_exec_ctx_flush(exec_ctx);
}
return n;
@ -82,7 +95,8 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
gpr_atm_no_barrier_store(&g_cur_threads, 1);
gpr_tls_init(&g_this_thread_state);
g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
g_thread_state =
(thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
for (size_t i = 0; i < g_max_threads; i++) {
gpr_mu_init(&g_thread_state[i].mu);
gpr_cv_init(&g_thread_state[i].cv);
@ -120,6 +134,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
}
void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
grpc_register_tracer(&executor_trace);
gpr_atm_no_barrier_store(&g_cur_threads, 0);
grpc_executor_set_threading(exec_ctx, true);
}
@ -129,7 +144,7 @@ void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
}
static void executor_thread(void *arg) {
thread_state *ts = arg;
thread_state *ts = (thread_state *)arg;
gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
grpc_exec_ctx exec_ctx =
@ -137,12 +152,21 @@ static void executor_thread(void *arg) {
size_t subtract_depth = 0;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
(int)(ts - g_thread_state), subtract_depth);
}
gpr_mu_lock(&ts->mu);
ts->depth -= subtract_depth;
while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
ts->queued_long_job = false;
gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
if (ts->shutdown) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
(int)(ts - g_thread_state));
}
gpr_mu_unlock(&ts->mu);
break;
}
@ -150,52 +174,128 @@ static void executor_thread(void *arg) {
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu);
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
}
subtract_depth = run_closures(&exec_ctx, exec);
grpc_exec_ctx_flush(&exec_ctx);
}
grpc_exec_ctx_finish(&exec_ctx);
}
static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
GRPC_STATS_INC_EXECUTOR_SCHEDULED_ITEMS(exec_ctx);
if (cur_thread_count == 0) {
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
grpc_error *error, bool is_short) {
bool retry_push;
if (is_short) {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
gpr_mu_lock(&ts->mu);
if (grpc_closure_list_empty(ts->elems)) {
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
gpr_cv_signal(&ts->cv);
GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
}
grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++;
bool try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
gpr_mu_unlock(&ts->mu);
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
&g_thread_state[cur_thread_count], &opt);
do {
retry_push = false;
size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count == 0) {
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created);
#else
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
#endif
}
grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
return;
}
gpr_spinlock_unlock(&g_adding_thread_lock);
}
thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
if (ts == NULL) {
ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
} else {
GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
}
thread_state *orig_ts = ts;
bool try_new_thread;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
#ifndef NDEBUG
gpr_log(
GPR_DEBUG,
"EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, (int)(ts - g_thread_state));
#else
gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long",
(int)(ts - g_thread_state));
#endif
}
gpr_mu_lock(&ts->mu);
if (ts->queued_long_job) {
// if there's a long job queued, we never queue anything else to this
// queue (since long jobs can take 'infinite' time and we need to
// guarantee no starvation)
// ... spin through queues and try again
gpr_mu_unlock(&ts->mu);
size_t idx = (size_t)(ts - g_thread_state);
ts = &g_thread_state[(idx + 1) % cur_thread_count];
if (ts == orig_ts) {
retry_push = true;
try_new_thread = true;
break;
}
continue;
}
if (grpc_closure_list_empty(ts->elems)) {
GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
gpr_cv_signal(&ts->cv);
}
grpc_closure_list_append(&ts->elems, closure, error);
ts->depth++;
try_new_thread = ts->depth > MAX_DEPTH &&
cur_thread_count < g_max_threads && !ts->shutdown;
if (!is_short) ts->queued_long_job = true;
gpr_mu_unlock(&ts->mu);
break;
}
if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
if (cur_thread_count < g_max_threads) {
gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
&g_thread_state[cur_thread_count], &opt);
}
gpr_spinlock_unlock(&g_adding_thread_lock);
}
if (retry_push) {
GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
}
} while (retry_push);
}
static const grpc_closure_scheduler_vtable executor_vtable = {
executor_push, executor_push, "executor"};
static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
executor_push(exec_ctx, closure, error, true);
}
static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_error *error) {
executor_push(exec_ctx, closure, error, false);
}
static const grpc_closure_scheduler_vtable executor_vtable_short = {
executor_push_short, executor_push_short, "executor"};
static grpc_closure_scheduler executor_scheduler_short = {
&executor_vtable_short};
static const grpc_closure_scheduler_vtable executor_vtable_long = {
executor_push_long, executor_push_long, "executor"};
static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
grpc_closure_scheduler *grpc_executor_scheduler(
grpc_executor_job_length length) {
return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
: &executor_scheduler_long;
}

@ -21,6 +21,11 @@
#include "src/core/lib/iomgr/closure.h"
typedef enum {
GRPC_EXECUTOR_SHORT,
GRPC_EXECUTOR_LONG
} grpc_executor_job_length;
/** Initialize the global executor.
*
* This mechanism is meant to outsource work (grpc_closure instances) to a
@ -28,7 +33,7 @@
* non-blocking solution available. */
void grpc_executor_init(grpc_exec_ctx *exec_ctx);
extern grpc_closure_scheduler *grpc_executor_scheduler;
grpc_closure_scheduler *grpc_executor_scheduler(grpc_executor_job_length);
/** Shutdown the executor, running all pending work as part of the call */
void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx);

@ -47,7 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
contents = gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
contents = (unsigned char *)gpr_malloc(contents_size +
(add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");

@ -112,13 +112,14 @@ static grpc_error *blocking_resolve_address_impl(
}
/* Success path: set addrs non-NULL, fill it in */
*addresses = gpr_malloc(sizeof(grpc_resolved_addresses));
*addresses =
(grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addresses)->naddrs = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
(*addresses)->naddrs++;
}
(*addresses)->addrs =
gpr_malloc(sizeof(grpc_resolved_address) * (*addresses)->naddrs);
(*addresses)->addrs = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address) * (*addresses)->naddrs);
i = 0;
for (resp = result; resp != NULL; resp = resp->ai_next) {
memcpy(&(*addresses)->addrs[i].addr, resp->ai_addr, resp->ai_addrlen);
@ -153,7 +154,7 @@ typedef struct {
* grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
grpc_error *error) {
request *r = rp;
request *r = (request *)rp;
GRPC_CLOSURE_SCHED(
exec_ctx, r->on_done,
grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
@ -174,9 +175,9 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
request *r = gpr_malloc(sizeof(request));
request *r = (request *)gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;

@ -159,7 +159,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_resolved_addresses **addresses) {
request *r = gpr_malloc(sizeof(request));
GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
grpc_executor_scheduler);
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
r->name = gpr_strdup(name);
r->default_port = gpr_strdup(default_port);
r->on_done = on_done;

@ -241,7 +241,7 @@ static bool rq_reclaim(grpc_exec_ctx *exec_ctx,
grpc_resource_quota *resource_quota, bool destructive);
static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) {
grpc_resource_quota *resource_quota = rq;
grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->step_scheduled = false;
do {
if (rq_alloc(exec_ctx, resource_quota)) goto done;
@ -380,12 +380,12 @@ typedef struct {
} ru_slice_refcount;
static void ru_slice_ref(void *p) {
ru_slice_refcount *rc = p;
ru_slice_refcount *rc = (ru_slice_refcount *)p;
gpr_ref(&rc->refs);
}
static void ru_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
ru_slice_refcount *rc = p;
ru_slice_refcount *rc = (ru_slice_refcount *)p;
if (gpr_unref(&rc->refs)) {
grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
gpr_free(rc);
@ -398,7 +398,8 @@ static const grpc_slice_refcount_vtable ru_slice_vtable = {
static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
size_t size) {
ru_slice_refcount *rc = gpr_malloc(sizeof(ru_slice_refcount) + size);
ru_slice_refcount *rc =
(ru_slice_refcount *)gpr_malloc(sizeof(ru_slice_refcount) + size);
rc->base.vtable = &ru_slice_vtable;
rc->base.sub_refcount = &rc->base;
gpr_ref_init(&rc->refs, 1);
@ -417,7 +418,7 @@ static grpc_slice ru_slice_create(grpc_resource_user *resource_user,
*/
static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION)) {
rq_step_sched(exec_ctx, resource_user->resource_quota);
@ -427,7 +428,7 @@ static void ru_allocate(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_add_to_free_pool(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
rulist_empty(resource_user->resource_quota,
@ -454,7 +455,7 @@ static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@ -469,7 +470,7 @@ static void ru_post_benign_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
if (!rulist_empty(resource_user->resource_quota,
GRPC_RULIST_AWAITING_ALLOCATION) &&
@ -485,7 +486,7 @@ static void ru_post_destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *ru,
}
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
GRPC_ERROR_CANCELLED);
GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
@ -497,7 +498,7 @@ static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
}
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
grpc_resource_user *resource_user = ru;
grpc_resource_user *resource_user = (grpc_resource_user *)ru;
GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
rulist_remove(resource_user, (grpc_rulist)i);
@ -518,7 +519,8 @@ static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
static void ru_allocated_slices(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_resource_user_slice_allocator *slice_allocator = arg;
grpc_resource_user_slice_allocator *slice_allocator =
(grpc_resource_user_slice_allocator *)arg;
if (error == GRPC_ERROR_NONE) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
@ -541,7 +543,7 @@ typedef struct {
} rq_resize_args;
static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
rq_resize_args *a = args;
rq_resize_args *a = (rq_resize_args *)args;
int64_t delta = a->size - a->resource_quota->size;
a->resource_quota->size += delta;
a->resource_quota->free_pool += delta;
@ -553,7 +555,7 @@ static void rq_resize(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) {
static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
grpc_error *error) {
grpc_resource_quota *resource_quota = rq;
grpc_resource_quota *resource_quota = (grpc_resource_quota *)rq;
resource_quota->reclaiming = false;
rq_step_sched(exec_ctx, resource_quota);
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
@ -565,7 +567,8 @@ static void rq_reclamation_done(grpc_exec_ctx *exec_ctx, void *rq,
/* Public API */
grpc_resource_quota *grpc_resource_quota_create(const char *name) {
grpc_resource_quota *resource_quota = gpr_malloc(sizeof(*resource_quota));
grpc_resource_quota *resource_quota =
(grpc_resource_quota *)gpr_malloc(sizeof(*resource_quota));
gpr_ref_init(&resource_quota->refs, 1);
resource_quota->combiner = grpc_combiner_create();
resource_quota->free_pool = INT64_MAX;
@ -629,7 +632,7 @@ double grpc_resource_quota_get_memory_pressure(
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
size_t size) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
rq_resize_args *a = gpr_malloc(sizeof(*a));
rq_resize_args *a = (rq_resize_args *)gpr_malloc(sizeof(*a));
a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
a->size = (int64_t)size;
gpr_atm_no_barrier_store(&resource_quota->last_size,
@ -684,7 +687,8 @@ const grpc_arg_pointer_vtable *grpc_resource_quota_arg_vtable(void) {
grpc_resource_user *grpc_resource_user_create(
grpc_resource_quota *resource_quota, const char *name) {
grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
grpc_resource_user *resource_user =
(grpc_resource_user *)gpr_malloc(sizeof(*resource_user));
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
GRPC_CLOSURE_INIT(&resource_user->allocate_closure, &ru_allocate,

@ -80,7 +80,8 @@ static grpc_error *prepare_socket(const grpc_resolved_address *addr, int fd,
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_SOCKET_MUTATOR)) {
GPR_ASSERT(channel_args->args[i].type == GRPC_ARG_POINTER);
grpc_socket_mutator *mutator = channel_args->args[i].value.pointer.p;
grpc_socket_mutator *mutator =
(grpc_socket_mutator *)channel_args->args[i].value.pointer.p;
err = grpc_set_socket_with_mutator(fd, mutator);
if (err != GRPC_ERROR_NONE) goto error;
}
@ -98,7 +99,7 @@ done:
static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
int done;
async_connect *ac = acp;
async_connect *ac = (async_connect *)acp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
@ -126,7 +127,7 @@ grpc_endpoint *grpc_tcp_client_create_from_fd(
}
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
async_connect *ac = acp;
async_connect *ac = (async_connect *)acp;
int so_error = 0;
socklen_t so_error_size;
int err;
@ -304,7 +305,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
ac = gpr_malloc(sizeof(async_connect));
ac = (async_connect *)gpr_malloc(sizeof(async_connect));
ac->closure = closure;
ac->ep = ep;
ac->fd = fdobj;

@ -43,6 +43,7 @@
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
@ -90,8 +91,8 @@ typedef struct {
grpc_closure *release_fd_cb;
int *release_fd;
grpc_closure read_closure;
grpc_closure write_closure;
grpc_closure read_done_closure;
grpc_closure write_done_closure;
char *peer_string;
@ -99,6 +100,148 @@ typedef struct {
grpc_resource_user_slice_allocator slice_allocator;
} grpc_tcp;
typedef struct backup_poller {
gpr_mu *pollset_mu;
grpc_closure run_poller;
} backup_poller;
#define BACKUP_POLLER_POLLSET(b) ((grpc_pollset *)((b) + 1))
static gpr_atm g_uncovered_notifications_pending;
static gpr_atm g_backup_poller; /* backup_poller* */
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error);
static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
void *arg /* grpc_tcp */,
grpc_error *error);
static void done_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
}
grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
gpr_free(p);
}
static void run_poller(grpc_exec_ctx *exec_ctx, void *bp,
grpc_error *error_ignored) {
backup_poller *p = (backup_poller *)bp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
}
gpr_mu_lock(p->pollset_mu);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec deadline =
gpr_time_add(now, gpr_time_from_seconds(10, GPR_TIMESPAN));
GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
GRPC_LOG_IF_ERROR("backup_poller:pollset_work",
grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), NULL,
now, deadline));
gpr_mu_unlock(p->pollset_mu);
/* last "uncovered" notification is the ref that keeps us polling, if we get
* there try a cas to release it */
if (gpr_atm_no_barrier_load(&g_uncovered_notifications_pending) == 1 &&
gpr_atm_full_cas(&g_uncovered_notifications_pending, 1, 0)) {
gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
}
gpr_mu_unlock(p->pollset_mu);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
}
grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx));
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
}
GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
}
}
static void drop_uncovered(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller);
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, (int)old_count,
(int)old_count - 1);
}
GPR_ASSERT(old_count != 1);
}
static void cover_self(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
backup_poller *p;
gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", (int)old_count,
2 + (int)old_count);
}
if (old_count == 0) {
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
p = (backup_poller *)gpr_malloc(sizeof(*p) + grpc_pollset_size());
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
}
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
GRPC_ERROR_NONE);
} else {
while ((p = (backup_poller *)gpr_atm_acq_load(&g_backup_poller)) == NULL) {
// spin waiting for backup poller
}
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
}
grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) {
drop_uncovered(exec_ctx, tcp);
}
}
static void notify_on_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
}
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
}
static void notify_on_write(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
}
cover_self(exec_ctx, tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure,
tcp_drop_uncovered_then_handle_write, tcp,
grpc_schedule_on_exec_ctx);
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
}
static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
}
drop_uncovered(exec_ctx, (grpc_tcp *)arg);
tcp_handle_write(exec_ctx, arg, error);
}
static void add_to_estimate(grpc_tcp *tcp, size_t bytes) {
tcp->bytes_read_this_round += (double)bytes;
}
@ -214,6 +357,7 @@ static void call_read_cb(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
grpc_closure *cb = tcp->read_cb;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i;
const char *str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str);
@ -271,7 +415,7 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
if (errno == EAGAIN) {
finish_estimate(tcp);
/* We've consumed the edge, request a new one */
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
notify_on_read(exec_ctx, tcp);
} else {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
tcp->incoming_buffer);
@ -307,7 +451,11 @@ static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp,
grpc_error *error) {
grpc_tcp *tcp = tcpp;
grpc_tcp *tcp = (grpc_tcp *)tcpp;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
@ -323,9 +471,15 @@ static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
size_t target_read_size = get_target_read_size(tcp);
if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
}
grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
target_read_size, 1, tcp->incoming_buffer);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
}
tcp_do_read(exec_ctx, tcp);
}
}
@ -334,6 +488,9 @@ static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
grpc_error *error) {
grpc_tcp *tcp = (grpc_tcp *)arg;
GPR_ASSERT(!tcp->finished_edge);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
}
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
@ -357,9 +514,9 @@ static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
TCP_REF(tcp, "read");
if (tcp->finished_edge) {
tcp->finished_edge = false;
grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
notify_on_read(exec_ctx, tcp);
} else {
GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
}
}
@ -472,7 +629,7 @@ static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
notify_on_write(exec_ctx, tcp);
} else {
cb = tcp->write_cb;
tcp->write_cb = NULL;
@ -525,7 +682,7 @@ static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "write: delayed");
}
grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
notify_on_write(exec_ctx, tcp);
} else {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(error);
@ -602,7 +759,7 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
channel_args->args[i].value.pointer.p);
(grpc_resource_quota *)channel_args->args[i].value.pointer.p);
}
}
}
@ -631,10 +788,6 @@ grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_fd *em_fd,
gpr_ref_init(&tcp->refcount, 1);
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
GRPC_CLOSURE_INIT(&tcp->read_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&tcp->write_closure, tcp_handle_write, tcp,
grpc_schedule_on_exec_ctx);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(

@ -74,7 +74,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_tcp_server **server) {
gpr_once_init(&check_init, init);
grpc_tcp_server *s = gpr_zalloc(sizeof(grpc_tcp_server));
grpc_tcp_server *s = (grpc_tcp_server *)gpr_zalloc(sizeof(grpc_tcp_server));
s->so_reuseport = has_so_reuseport;
s->expand_wildcard_addrs = false;
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
@ -138,7 +138,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
grpc_tcp_server *s = server;
grpc_tcp_server *s = (grpc_tcp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@ -197,7 +197,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_tcp_listener *sp = arg;
grpc_tcp_listener *sp = (grpc_tcp_listener *)arg;
if (err != GRPC_ERROR_NONE) {
goto error;
@ -251,7 +251,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
// Create acceptor.
grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
grpc_tcp_server_acceptor *acceptor =
(grpc_tcp_server_acceptor *)gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
@ -365,7 +366,7 @@ static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
listener->server->nports++;
grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = listener->next;
listener->next = sp;
/* sp (the new listener) is a sibling of 'listener' (the original

@ -93,7 +93,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
gpr_mu_lock(&s->mu);
s->nports++;
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp = (grpc_tcp_listener *)gpr_malloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;

@ -74,8 +74,8 @@ static void maybe_shrink(grpc_timer_heap *heap) {
if (heap->timer_count >= 8 &&
heap->timer_count <= heap->timer_capacity / SHRINK_FULLNESS_FACTOR / 2) {
heap->timer_capacity = heap->timer_count * SHRINK_FULLNESS_FACTOR;
heap->timers =
gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
heap->timers = (grpc_timer **)gpr_realloc(
heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
}
@ -99,8 +99,8 @@ int grpc_timer_heap_add(grpc_timer_heap *heap, grpc_timer *timer) {
if (heap->timer_count == heap->timer_capacity) {
heap->timer_capacity =
GPR_MAX(heap->timer_capacity + 1, heap->timer_capacity * 3 / 2);
heap->timers =
gpr_realloc(heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
heap->timers = (grpc_timer **)gpr_realloc(
heap->timers, heap->timer_capacity * sizeof(grpc_timer *));
}
timer->heap_index = heap->timer_count;
adjust_upwards(heap->timers, heap->timer_count, timer);

@ -83,7 +83,7 @@ static void start_timer_thread_and_unlock(void) {
}
gpr_thd_options opt = gpr_thd_options_default();
gpr_thd_options_set_joinable(&opt);
completed_thread *ct = gpr_malloc(sizeof(*ct));
completed_thread *ct = (completed_thread *)gpr_malloc(sizeof(*ct));
// The call to gpr_thd_new() has to be under the same lock used by
// gc_completed_threads(), particularly due to ct->t, which is written here
// (internally by gpr_thd_new) and read there. Otherwise it's possible for ct

@ -125,7 +125,7 @@ static grpc_socket_factory *get_socket_factory(const grpc_channel_args *args) {
}
grpc_udp_server *grpc_udp_server_create(const grpc_channel_args *args) {
grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
grpc_udp_server *s = (grpc_udp_server *)gpr_malloc(sizeof(grpc_udp_server));
gpr_mu_init(&s->mu);
s->socket_factory = get_socket_factory(args);
if (s->socket_factory) {
@ -176,7 +176,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
static void destroyed_port(grpc_exec_ctx *exec_ctx, void *server,
grpc_error *error) {
grpc_udp_server *s = server;
grpc_udp_server *s = (grpc_udp_server *)server;
gpr_mu_lock(&s->mu);
s->destroyed_ports++;
if (s->destroyed_ports == s->nports) {
@ -237,7 +237,8 @@ void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
if (s->active_ports) {
for (sp = s->head; sp; sp = sp->next) {
GPR_ASSERT(sp->orphan_cb);
struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
struct shutdown_fd_args *args =
(struct shutdown_fd_args *)gpr_malloc(sizeof(*args));
args->fd = sp->emfd;
args->server_mu = &s->mu;
GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
@ -331,7 +332,7 @@ error:
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_udp_listener *sp = arg;
grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&sp->server->mu);
if (error != GRPC_ERROR_NONE) {
@ -354,7 +355,7 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
static void on_write(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_udp_listener *sp = arg;
grpc_udp_listener *sp = (grpc_udp_listener *)arg;
gpr_mu_lock(&(sp->server->mu));
if (error != GRPC_ERROR_NONE) {
@ -393,7 +394,7 @@ static int add_socket_to_server(grpc_udp_server *s, int fd,
gpr_free(addr_str);
gpr_mu_lock(&s->mu);
s->nports++;
sp = gpr_malloc(sizeof(grpc_udp_listener));
sp = (grpc_udp_listener *)gpr_malloc(sizeof(grpc_udp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@ -444,7 +445,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t *)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
allocated_addr = gpr_malloc(sizeof(grpc_resolved_address));
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;

@ -49,9 +49,11 @@ grpc_error *grpc_resolve_unix_domain_address(const char *name,
gpr_free(err_msg);
return err;
}
*addrs = gpr_malloc(sizeof(grpc_resolved_addresses));
*addrs =
(grpc_resolved_addresses *)gpr_malloc(sizeof(grpc_resolved_addresses));
(*addrs)->naddrs = 1;
(*addrs)->addrs = gpr_malloc(sizeof(grpc_resolved_address));
(*addrs)->addrs =
(grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
un = (struct sockaddr_un *)(*addrs)->addrs->addr;
un->sun_family = AF_UNIX;
strcpy(un->sun_path, name);

@ -42,7 +42,8 @@ static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
gpr_mu_lock(&g_cvfds.mu);
if (!g_cvfds.free_fds) {
newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
g_cvfds.cvfds = gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
g_cvfds.cvfds =
(fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
for (i = g_cvfds.size; i < newsize; i++) {
g_cvfds.cvfds[i].is_set = 0;
g_cvfds.cvfds[i].cvs = NULL;

@ -23,7 +23,7 @@
#include "src/core/lib/json/json.h"
grpc_json* grpc_json_create(grpc_json_type type) {
grpc_json* json = gpr_zalloc(sizeof(*json));
grpc_json* json = (grpc_json*)gpr_zalloc(sizeof(*json));
json->type = type;
return json;

@ -63,19 +63,19 @@ typedef struct {
* bytes at a time (or multiples thereof).
*/
static void json_writer_output_check(void *userdata, size_t needed) {
json_writer_userdata *state = userdata;
json_writer_userdata *state = (json_writer_userdata *)userdata;
if (state->free_space >= needed) return;
needed -= state->free_space;
/* Round up by 256 bytes. */
needed = (needed + 0xff) & ~0xffU;
state->output = gpr_realloc(state->output, state->allocated + needed);
state->output = (char *)gpr_realloc(state->output, state->allocated + needed);
state->free_space += needed;
state->allocated += needed;
}
/* These are needed by the writer's implementation. */
static void json_writer_output_char(void *userdata, char c) {
json_writer_userdata *state = userdata;
json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, 1);
state->output[state->string_len++] = c;
state->free_space--;
@ -83,7 +83,7 @@ static void json_writer_output_char(void *userdata, char c) {
static void json_writer_output_string_with_len(void *userdata, const char *str,
size_t len) {
json_writer_userdata *state = userdata;
json_writer_userdata *state = (json_writer_userdata *)userdata;
json_writer_output_check(userdata, len);
memcpy(state->output + state->string_len, str, len);
state->string_len += len;
@ -99,7 +99,7 @@ static void json_writer_output_string(void *userdata, const char *str) {
* the end of the current string, and advance our output pointer.
*/
static void json_reader_string_clear(void *userdata) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->string) {
GPR_ASSERT(state->string_ptr < state->input);
*state->string_ptr++ = 0;
@ -108,7 +108,7 @@ static void json_reader_string_clear(void *userdata) {
}
static void json_reader_string_add_char(void *userdata, uint32_t c) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->string_ptr < state->input);
GPR_ASSERT(c <= 0xff);
*state->string_ptr++ = (uint8_t)c;
@ -149,7 +149,7 @@ static void json_reader_string_add_utf32(void *userdata, uint32_t c) {
*/
static uint32_t json_reader_read_char(void *userdata) {
uint32_t r;
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
if (state->remaining_input == 0) return GRPC_JSON_READ_CHAR_EOF;
@ -168,7 +168,7 @@ static uint32_t json_reader_read_char(void *userdata) {
* our tree-in-progress inside our opaque structure.
*/
static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = grpc_json_create(type);
json->parent = state->current_container;
@ -194,7 +194,7 @@ static grpc_json *json_create_and_link(void *userdata, grpc_json_type type) {
}
static void json_reader_container_begins(void *userdata, grpc_json_type type) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *container;
GPR_ASSERT(type == GRPC_JSON_ARRAY || type == GRPC_JSON_OBJECT);
@ -215,7 +215,7 @@ static void json_reader_container_begins(void *userdata, grpc_json_type type) {
*/
static grpc_json_type json_reader_container_ends(void *userdata) {
grpc_json_type container_type = GRPC_JSON_TOP_LEVEL;
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
GPR_ASSERT(state->current_container);
@ -236,18 +236,18 @@ static grpc_json_type json_reader_container_ends(void *userdata) {
* We'll keep it as a string, and leave it to the caller to evaluate it.
*/
static void json_reader_set_key(void *userdata) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
state->key = state->string;
}
static void json_reader_set_string(void *userdata) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_STRING);
json->value = (char *)state->string;
}
static int json_reader_set_number(void *userdata) {
json_reader_userdata *state = userdata;
json_reader_userdata *state = (json_reader_userdata *)userdata;
grpc_json *json = json_create_and_link(userdata, GRPC_JSON_NUMBER);
json->value = (char *)state->string;
return 1;

@ -128,13 +128,11 @@ static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = arg;
gpr_mu_lock(&h->mu);
static void on_peer_checked_inner(grpc_exec_ctx *exec_ctx,
security_handshaker *h, grpc_error *error) {
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
goto done;
return;
}
// Create zero-copy frame protector, if implemented.
tsi_zero_copy_grpc_protector *zero_copy_protector = NULL;
@ -146,7 +144,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Zero-copy frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
goto done;
return;
}
// Create frame protector if zero-copy frame protector is NULL.
tsi_frame_protector *protector = NULL;
@ -158,7 +156,7 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
"Frame protector creation failed"),
result);
security_handshake_failed_locked(exec_ctx, h, error);
goto done;
return;
}
}
// Get unused bytes.
@ -192,7 +190,13 @@ static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
// Set shutdown to true so that subsequent calls to
// security_handshaker_shutdown() do nothing.
h->shutdown = true;
done:
}
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
on_peer_checked_inner(exec_ctx, h, error);
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
}
@ -254,7 +258,7 @@ static grpc_error *on_handshake_next_done_locked(
static void on_handshake_next_done_grpc_wrapper(
tsi_result result, void *user_data, const unsigned char *bytes_to_send,
size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
security_handshaker *h = user_data;
security_handshaker *h = (security_handshaker *)user_data;
// This callback will be invoked by TSI in a non-grpc thread, so it's
// safe to create our own exec_ctx here.
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
@ -296,7 +300,7 @@ static grpc_error *do_handshaker_next_locked(
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
void *arg, grpc_error *error) {
security_handshaker *h = arg;
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@ -313,7 +317,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
bytes_received_size += GRPC_SLICE_LENGTH(h->args->read_buffer->slices[i]);
}
if (bytes_received_size > h->handshake_buffer_size) {
h->handshake_buffer = gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer =
(uint8_t *)gpr_realloc(h->handshake_buffer, bytes_received_size);
h->handshake_buffer_size = bytes_received_size;
}
size_t offset = 0;
@ -338,7 +343,7 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
security_handshaker *h = arg;
security_handshaker *h = (security_handshaker *)arg;
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
@ -415,14 +420,15 @@ static const grpc_handshaker_vtable security_handshaker_vtable = {
static grpc_handshaker *security_handshaker_create(
grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
grpc_security_connector *connector) {
security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
security_handshaker *h =
(security_handshaker *)gpr_zalloc(sizeof(security_handshaker));
grpc_handshaker_init(&security_handshaker_vtable, &h->base);
h->handshaker = handshaker;
h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
gpr_mu_init(&h->mu);
gpr_ref_init(&h->refs, 1);
h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
h->handshake_buffer = (uint8_t *)gpr_malloc(h->handshake_buffer_size);
GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
on_handshake_data_sent_to_peer, h,
grpc_schedule_on_exec_ctx);
@ -465,7 +471,7 @@ static const grpc_handshaker_vtable fail_handshaker_vtable = {
fail_handshaker_do_handshake};
static grpc_handshaker *fail_handshaker_create() {
grpc_handshaker *h = gpr_malloc(sizeof(*h));
grpc_handshaker *h = (grpc_handshaker *)gpr_malloc(sizeof(*h));
grpc_handshaker_init(&fail_handshaker_vtable, h);
return h;
}

@ -58,7 +58,7 @@ char *grpc_base64_encode(const void *vdata, size_t data_size, int url_safe,
int multiline) {
size_t result_projected_size =
grpc_base64_estimate_encoded_size(data_size, url_safe, multiline);
char *result = gpr_malloc(result_projected_size);
char *result = (char *)gpr_malloc(result_projected_size);
grpc_base64_encode_core(result, vdata, data_size, url_safe, multiline);
return result;
}
@ -75,7 +75,7 @@ size_t grpc_base64_estimate_encoded_size(size_t data_size, int url_safe,
void grpc_base64_encode_core(char *result, const void *vdata, size_t data_size,
int url_safe, int multiline) {
const unsigned char *data = vdata;
const unsigned char *data = (const unsigned char *)vdata;
const char *base64_chars =
url_safe ? base64_url_safe_chars : base64_url_unsafe_chars;
const size_t result_projected_size =

@ -27,7 +27,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
char *grpc_slice_to_c_string(grpc_slice slice) {
char *out = gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
char *out = (char *)gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1);
memcpy(out, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));
out[GRPC_SLICE_LENGTH(slice)] = 0;
return out;
@ -105,12 +105,12 @@ typedef struct new_slice_refcount {
} new_slice_refcount;
static void new_slice_ref(void *p) {
new_slice_refcount *r = p;
new_slice_refcount *r = (new_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
new_slice_refcount *r = p;
new_slice_refcount *r = (new_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data);
gpr_free(r);
@ -125,7 +125,8 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
void (*destroy)(void *),
void *user_data) {
grpc_slice slice;
new_slice_refcount *rc = gpr_malloc(sizeof(new_slice_refcount));
new_slice_refcount *rc =
(new_slice_refcount *)gpr_malloc(sizeof(new_slice_refcount));
gpr_ref_init(&rc->refs, 1);
rc->rc.vtable = &new_slice_vtable;
rc->rc.sub_refcount = &rc->rc;
@ -133,7 +134,7 @@ grpc_slice grpc_slice_new_with_user_data(void *p, size_t len,
rc->user_data = user_data;
slice.refcount = &rc->rc;
slice.data.refcounted.bytes = p;
slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@ -154,12 +155,12 @@ typedef struct new_with_len_slice_refcount {
} new_with_len_slice_refcount;
static void new_with_len_ref(void *p) {
new_with_len_slice_refcount *r = p;
new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
gpr_ref(&r->refs);
}
static void new_with_len_unref(grpc_exec_ctx *exec_ctx, void *p) {
new_with_len_slice_refcount *r = p;
new_with_len_slice_refcount *r = (new_with_len_slice_refcount *)p;
if (gpr_unref(&r->refs)) {
r->user_destroy(r->user_data, r->user_length);
gpr_free(r);
@ -183,7 +184,7 @@ grpc_slice grpc_slice_new_with_len(void *p, size_t len,
rc->user_length = len;
slice.refcount = &rc->rc;
slice.data.refcounted.bytes = p;
slice.data.refcounted.bytes = (uint8_t *)p;
slice.data.refcounted.length = len;
return slice;
}
@ -205,12 +206,12 @@ typedef struct {
} malloc_refcount;
static void malloc_ref(void *p) {
malloc_refcount *r = p;
malloc_refcount *r = (malloc_refcount *)p;
gpr_ref(&r->refs);
}
static void malloc_unref(grpc_exec_ctx *exec_ctx, void *p) {
malloc_refcount *r = p;
malloc_refcount *r = (malloc_refcount *)p;
if (gpr_unref(&r->refs)) {
gpr_free(r);
}
@ -232,7 +233,8 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
refcount is a malloc_refcount
bytes is an array of bytes of the requested length
Both parts are placed in the same allocation returned from gpr_malloc */
malloc_refcount *rc = gpr_malloc(sizeof(malloc_refcount) + length);
malloc_refcount *rc =
(malloc_refcount *)gpr_malloc(sizeof(malloc_refcount) + length);
/* Initial refcount on rc is 1 - and it's up to the caller to release
this reference. */
@ -451,7 +453,7 @@ int grpc_slice_rchr(grpc_slice s, char c) {
int grpc_slice_chr(grpc_slice s, char c) {
const char *b = (const char *)GRPC_SLICE_START_PTR(s);
const char *p = memchr(b, c, GRPC_SLICE_LENGTH(s));
const char *p = (const char *)memchr(b, c, GRPC_SLICE_LENGTH(s));
return p == NULL ? -1 : (int)(p - b);
}

@ -45,11 +45,12 @@ static void maybe_embiggen(grpc_slice_buffer *sb) {
sb->capacity = GROW(sb->capacity);
GPR_ASSERT(sb->capacity > slice_count);
if (sb->base_slices == sb->inlined) {
sb->base_slices = gpr_malloc(sb->capacity * sizeof(grpc_slice));
sb->base_slices =
(grpc_slice *)gpr_malloc(sb->capacity * sizeof(grpc_slice));
memcpy(sb->base_slices, sb->inlined, slice_count * sizeof(grpc_slice));
} else {
sb->base_slices =
gpr_realloc(sb->base_slices, sb->capacity * sizeof(grpc_slice));
sb->base_slices = (grpc_slice *)gpr_realloc(
sb->base_slices, sb->capacity * sizeof(grpc_slice));
}
sb->slices = sb->base_slices + slice_offset;
@ -291,7 +292,7 @@ void grpc_slice_buffer_move_first_no_ref(grpc_slice_buffer *src, size_t n,
void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx *exec_ctx,
grpc_slice_buffer *src, size_t n,
void *dst) {
char *dstp = dst;
char *dstp = (char *)dst;
GPR_ASSERT(src->length >= n);
while (n > 0) {

@ -60,14 +60,15 @@ grpc_slice_hash_table* grpc_slice_hash_table_create(
size_t num_entries, grpc_slice_hash_table_entry* entries,
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
int (*value_cmp)(void* a, void* b)) {
grpc_slice_hash_table* table = gpr_zalloc(sizeof(*table));
grpc_slice_hash_table* table =
(grpc_slice_hash_table*)gpr_zalloc(sizeof(*table));
gpr_ref_init(&table->refs, 1);
table->destroy_value = destroy_value;
table->value_cmp = value_cmp;
// Keep load factor low to improve performance of lookups.
table->size = num_entries * 2;
const size_t entry_size = sizeof(grpc_slice_hash_table_entry) * table->size;
table->entries = gpr_zalloc(entry_size);
table->entries = (grpc_slice_hash_table_entry*)gpr_zalloc(entry_size);
for (size_t i = 0; i < num_entries; ++i) {
grpc_slice_hash_table_entry* entry = &entries[i];
grpc_slice_hash_table_add(table, entry->key, entry->value);

@ -69,7 +69,7 @@ static uint32_t max_static_metadata_hash_probe;
static uint32_t static_metadata_hash_values[GRPC_STATIC_MDSTR_COUNT];
static void interned_slice_ref(void *p) {
interned_slice_refcount *s = p;
interned_slice_refcount *s = (interned_slice_refcount *)p;
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) > 0);
}
@ -90,7 +90,7 @@ static void interned_slice_destroy(interned_slice_refcount *s) {
}
static void interned_slice_unref(grpc_exec_ctx *exec_ctx, void *p) {
interned_slice_refcount *s = p;
interned_slice_refcount *s = (interned_slice_refcount *)p;
if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
interned_slice_destroy(s);
}
@ -129,7 +129,8 @@ static void grow_shard(slice_shard *shard) {
GPR_TIMER_BEGIN("grow_strtab", 0);
strtab = gpr_zalloc(sizeof(interned_slice_refcount *) * capacity);
strtab = (interned_slice_refcount **)gpr_zalloc(
sizeof(interned_slice_refcount *) * capacity);
for (i = 0; i < shard->capacity; i++) {
for (s = shard->strs[i]; s; s = next) {
@ -242,7 +243,8 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* not found: create a new string */
/* string data goes after the internal_string header */
s = gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice));
s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
GRPC_SLICE_LENGTH(slice));
gpr_atm_rel_store(&s->refcnt, 1);
s->length = GRPC_SLICE_LENGTH(slice);
s->hash = hash;
@ -280,7 +282,8 @@ void grpc_slice_intern_init(void) {
gpr_mu_init(&shard->mu);
shard->count = 0;
shard->capacity = INITIAL_SHARD_CAPACITY;
shard->strs = gpr_zalloc(sizeof(*shard->strs) * shard->capacity);
shard->strs = (interned_slice_refcount **)gpr_zalloc(sizeof(*shard->strs) *
shard->capacity);
}
for (size_t i = 0; i < GPR_ARRAY_SIZE(static_metadata_hash); i++) {
static_metadata_hash[i].hash = 0;

@ -80,12 +80,12 @@ static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
static void alarm_end_completion(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *c) {
grpc_alarm *alarm = arg;
grpc_alarm *alarm = (grpc_alarm *)arg;
GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
}
static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_alarm *alarm = arg;
grpc_alarm *alarm = (grpc_alarm *)arg;
/* We are queuing an op on completion queue. This means, the alarm's structure
cannot be destroyed until the op is dequeued. Adding an extra ref
@ -96,7 +96,7 @@ static void alarm_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
grpc_alarm *grpc_alarm_create(void *reserved) {
grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));
#ifndef NDEBUG
if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save