diff --git a/doc/environment_variables.md b/doc/environment_variables.md
index 1eb850e976d..d1172e62f45 100644
--- a/doc/environment_variables.md
+++ b/doc/environment_variables.md
@@ -140,3 +140,13 @@ some configuration as environment variables that can be set.
 * grpc_cfstream
   set to 1 to turn on CFStream experiment. With this experiment gRPC uses CFStream API to make TCP
   connections. The option is only available on iOS platform and when macro GRPC_CFSTREAM is defined.
+
+* GRPC_ARENA_INIT_STRATEGY
+  Selects the initialization strategy for blocks allocated in the arena. Valid
+  values are:
+  - no_init (default): Do not inialize the arena block.
+  - zero_init: Initialize the arena blocks with 0.
+  - non_zero_init: Initialize the arena blocks with a non-zero value.
+
+  NOTE: This environment variable is experimental and will be removed. Thus, it
+        should not be relied upon.
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 58db7a05b4d..d42961fc608 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -938,12 +938,26 @@ static void cc_destroy_channel_elem(grpc_channel_element* elem) {
 //   (census filter is on top of this one)
 // - add census stats for retries
 
+namespace {
+struct call_data;
+
 // State used for starting a retryable batch on a subchannel call.
 // This provides its own grpc_transport_stream_op_batch and other data
 // structures needed to populate the ops in the batch.
 // We allocate one struct on the arena for each attempt at starting a
 // batch on a given subchannel call.
-typedef struct {
+struct subchannel_batch_data {
+  subchannel_batch_data(grpc_call_element* elem, call_data* calld, int refcount,
+                        bool set_on_complete);
+  // All dtor code must be added in `destroy`. This is because we may
+  // call closures in `subchannel_batch_data` after they are unrefed by
+  // `batch_data_unref`, and msan would complain about accessing this class
+  // after calling dtor. As a result we cannot call the `dtor` in
+  // `batch_data_unref`.
+  // TODO(soheil): We should try to call the dtor in `batch_data_unref`.
+  ~subchannel_batch_data() { destroy(); }
+  void destroy();
+
   gpr_refcount refs;
   grpc_call_element* elem;
   grpc_subchannel_call* subchannel_call;  // Holds a ref.
@@ -952,11 +966,23 @@ typedef struct {
   grpc_transport_stream_op_batch batch;
   // For intercepting on_complete.
   grpc_closure on_complete;
-} subchannel_batch_data;
+};
 
 // Retry state associated with a subchannel call.
 // Stored in the parent_data of the subchannel call object.
-typedef struct {
+struct subchannel_call_retry_state {
+  explicit subchannel_call_retry_state(grpc_call_context_element* context)
+      : batch_payload(context),
+        started_send_initial_metadata(false),
+        completed_send_initial_metadata(false),
+        started_send_trailing_metadata(false),
+        completed_send_trailing_metadata(false),
+        started_recv_initial_metadata(false),
+        completed_recv_initial_metadata(false),
+        started_recv_trailing_metadata(false),
+        completed_recv_trailing_metadata(false),
+        retry_dispatched(false) {}
+
   // subchannel_batch_data.batch.payload points to this.
   grpc_transport_stream_op_batch_payload batch_payload;
   // For send_initial_metadata.
@@ -975,7 +1001,7 @@ typedef struct {
   // For intercepting recv_initial_metadata.
   grpc_metadata_batch recv_initial_metadata;
   grpc_closure recv_initial_metadata_ready;
-  bool trailing_metadata_available;
+  bool trailing_metadata_available = false;
   // For intercepting recv_message.
   grpc_closure recv_message_ready;
   grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_message;
@@ -985,10 +1011,10 @@ typedef struct {
   grpc_closure recv_trailing_metadata_ready;
   // These fields indicate which ops have been started and completed on
   // this subchannel call.
-  size_t started_send_message_count;
-  size_t completed_send_message_count;
-  size_t started_recv_message_count;
-  size_t completed_recv_message_count;
+  size_t started_send_message_count = 0;
+  size_t completed_send_message_count = 0;
+  size_t started_recv_message_count = 0;
+  size_t completed_recv_message_count = 0;
   bool started_send_initial_metadata : 1;
   bool completed_send_initial_metadata : 1;
   bool started_send_trailing_metadata : 1;
@@ -999,12 +1025,12 @@ typedef struct {
   bool completed_recv_trailing_metadata : 1;
   // State for callback processing.
   bool retry_dispatched : 1;
-  subchannel_batch_data* recv_initial_metadata_ready_deferred_batch;
-  grpc_error* recv_initial_metadata_error;
-  subchannel_batch_data* recv_message_ready_deferred_batch;
-  grpc_error* recv_message_error;
-  subchannel_batch_data* recv_trailing_metadata_internal_batch;
-} subchannel_call_retry_state;
+  subchannel_batch_data* recv_initial_metadata_ready_deferred_batch = nullptr;
+  grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+  subchannel_batch_data* recv_message_ready_deferred_batch = nullptr;
+  grpc_error* recv_message_error = GRPC_ERROR_NONE;
+  subchannel_batch_data* recv_trailing_metadata_internal_batch = nullptr;
+};
 
 // Pending batches stored in call data.
 typedef struct {
@@ -1019,7 +1045,44 @@ typedef struct {
     Handles queueing of stream ops until a call object is ready, waiting
     for initial metadata before trying to create a call object,
     and handling cancellation gracefully. */
-typedef struct client_channel_call_data {
+struct call_data {
+  call_data(grpc_call_element* elem, const channel_data& chand,
+            const grpc_call_element_args& args)
+      : deadline_state(elem, args.call_stack, args.call_combiner,
+                       GPR_LIKELY(chand.deadline_checking_enabled)
+                           ? args.deadline
+                           : GRPC_MILLIS_INF_FUTURE),
+        path(grpc_slice_ref_internal(args.path)),
+        call_start_time(args.start_time),
+        deadline(args.deadline),
+        arena(args.arena),
+        owning_call(args.call_stack),
+        call_combiner(args.call_combiner),
+        pending_send_initial_metadata(false),
+        pending_send_message(false),
+        pending_send_trailing_metadata(false),
+        enable_retries(chand.enable_retries),
+        retry_committed(false),
+        last_attempt_got_server_pushback(false) {}
+
+  ~call_data() {
+    if (GPR_LIKELY(subchannel_call != nullptr)) {
+      GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call,
+                                 "client_channel_destroy_call");
+    }
+    grpc_slice_unref_internal(path);
+    GRPC_ERROR_UNREF(cancel_error);
+    for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches); ++i) {
+      GPR_ASSERT(pending_batches[i].batch == nullptr);
+    }
+    for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
+      if (pick.subchannel_call_context[i].value != nullptr) {
+        pick.subchannel_call_context[i].destroy(
+            pick.subchannel_call_context[i].value);
+      }
+    }
+  }
+
   // State for handling deadlines.
   // The code in deadline_filter.c requires this to be the first field.
   // TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
@@ -1038,24 +1101,24 @@ typedef struct client_channel_call_data {
   grpc_core::RefCountedPtr<ServerRetryThrottleData> retry_throttle_data;
   grpc_core::RefCountedPtr<ClientChannelMethodParams> method_params;
 
-  grpc_subchannel_call* subchannel_call;
+  grpc_subchannel_call* subchannel_call = nullptr;
 
   // Set when we get a cancel_stream op.
-  grpc_error* cancel_error;
+  grpc_error* cancel_error = GRPC_ERROR_NONE;
 
   grpc_core::LoadBalancingPolicy::PickState pick;
   grpc_closure pick_closure;
   grpc_closure pick_cancel_closure;
 
-  grpc_polling_entity* pollent;
-  bool pollent_added_to_interested_parties;
+  grpc_polling_entity* pollent = nullptr;
+  bool pollent_added_to_interested_parties = false;
 
   // Batches are added to this list when received from above.
   // They are removed when we are done handling the batch (i.e., when
   // either we have invoked all of the batch's callbacks or we have
   // passed the batch down to the subchannel call and are not
   // intercepting any of its callbacks).
-  pending_batch pending_batches[MAX_PENDING_BATCHES];
+  pending_batch pending_batches[MAX_PENDING_BATCHES] = {};
   bool pending_send_initial_metadata : 1;
   bool pending_send_message : 1;
   bool pending_send_trailing_metadata : 1;
@@ -1064,8 +1127,8 @@ typedef struct client_channel_call_data {
   bool enable_retries : 1;
   bool retry_committed : 1;
   bool last_attempt_got_server_pushback : 1;
-  int num_attempts_completed;
-  size_t bytes_buffered_for_retry;
+  int num_attempts_completed = 0;
+  size_t bytes_buffered_for_retry = 0;
   grpc_core::ManualConstructor<grpc_core::BackOff> retry_backoff;
   grpc_timer retry_timer;
 
@@ -1076,12 +1139,12 @@ typedef struct client_channel_call_data {
   // until all of these batches have completed.
   // Note that we actually only need to track replay batches, but it's
   // easier to track all batches with send ops.
-  int num_pending_retriable_subchannel_send_batches;
+  int num_pending_retriable_subchannel_send_batches = 0;
 
   // Cached data for retrying send ops.
   // send_initial_metadata
-  bool seen_send_initial_metadata;
-  grpc_linked_mdelem* send_initial_metadata_storage;
+  bool seen_send_initial_metadata = false;
+  grpc_linked_mdelem* send_initial_metadata_storage = nullptr;
   grpc_metadata_batch send_initial_metadata;
   uint32_t send_initial_metadata_flags;
   gpr_atm* peer_string;
@@ -1092,14 +1155,13 @@ typedef struct client_channel_call_data {
   // Note: We inline the cache for the first 3 send_message ops and use
   // dynamic allocation after that.  This number was essentially picked
   // at random; it could be changed in the future to tune performance.
-  grpc_core::ManualConstructor<
-      grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3>>
-      send_messages;
+  grpc_core::InlinedVector<grpc_core::ByteStreamCache*, 3> send_messages;
   // send_trailing_metadata
-  bool seen_send_trailing_metadata;
-  grpc_linked_mdelem* send_trailing_metadata_storage;
+  bool seen_send_trailing_metadata = false;
+  grpc_linked_mdelem* send_trailing_metadata_storage = nullptr;
   grpc_metadata_batch send_trailing_metadata;
-} call_data;
+};
+}  // namespace
 
 // Forward declarations.
 static void retry_commit(grpc_call_element* elem,
@@ -1143,7 +1205,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
             gpr_arena_alloc(calld->arena, sizeof(grpc_core::ByteStreamCache)));
     new (cache) grpc_core::ByteStreamCache(
         std::move(batch->payload->send_message.send_message));
-    calld->send_messages->push_back(cache);
+    calld->send_messages.push_back(cache);
   }
   // Save metadata batch for send_trailing_metadata ops.
   if (batch->send_trailing_metadata) {
@@ -1180,7 +1242,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
             "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
             chand, calld, idx);
   }
-  (*calld->send_messages)[idx]->Destroy();
+  calld->send_messages[idx]->Destroy();
 }
 
 // Frees cached send_trailing_metadata.
@@ -1650,55 +1712,66 @@ static bool maybe_retry(grpc_call_element* elem,
 // subchannel_batch_data
 //
 
-// Creates a subchannel_batch_data object on the call's arena with the
-// specified refcount.  If set_on_complete is true, the batch's
-// on_complete callback will be set to point to on_complete();
-// otherwise, the batch's on_complete callback will be null.
-static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
-                                                int refcount,
-                                                bool set_on_complete) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
+namespace {
+subchannel_batch_data::subchannel_batch_data(grpc_call_element* elem,
+                                             call_data* calld, int refcount,
+                                             bool set_on_complete)
+    : elem(elem),
+      subchannel_call(GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call,
+                                               "batch_data_create")) {
   subchannel_call_retry_state* retry_state =
       static_cast<subchannel_call_retry_state*>(
           grpc_connected_subchannel_call_get_parent_data(
               calld->subchannel_call));
-  subchannel_batch_data* batch_data = static_cast<subchannel_batch_data*>(
-      gpr_arena_alloc(calld->arena, sizeof(*batch_data)));
-  batch_data->elem = elem;
-  batch_data->subchannel_call =
-      GRPC_SUBCHANNEL_CALL_REF(calld->subchannel_call, "batch_data_create");
-  batch_data->batch.payload = &retry_state->batch_payload;
-  gpr_ref_init(&batch_data->refs, refcount);
+  batch.payload = &retry_state->batch_payload;
+  gpr_ref_init(&refs, refcount);
   if (set_on_complete) {
-    GRPC_CLOSURE_INIT(&batch_data->on_complete, on_complete, batch_data,
+    GRPC_CLOSURE_INIT(&on_complete, ::on_complete, this,
                       grpc_schedule_on_exec_ctx);
-    batch_data->batch.on_complete = &batch_data->on_complete;
+    batch.on_complete = &on_complete;
   }
   GRPC_CALL_STACK_REF(calld->owning_call, "batch_data");
+}
+
+void subchannel_batch_data::destroy() {
+  subchannel_call_retry_state* retry_state =
+      static_cast<subchannel_call_retry_state*>(
+          grpc_connected_subchannel_call_get_parent_data(subchannel_call));
+  if (batch.send_initial_metadata) {
+    grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
+  }
+  if (batch.send_trailing_metadata) {
+    grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
+  }
+  if (batch.recv_initial_metadata) {
+    grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
+  }
+  if (batch.recv_trailing_metadata) {
+    grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
+  }
+  GRPC_SUBCHANNEL_CALL_UNREF(subchannel_call, "batch_data_unref");
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+}
+}  // namespace
+
+// Creates a subchannel_batch_data object on the call's arena with the
+// specified refcount.  If set_on_complete is true, the batch's
+// on_complete callback will be set to point to on_complete();
+// otherwise, the batch's on_complete callback will be null.
+static subchannel_batch_data* batch_data_create(grpc_call_element* elem,
+                                                int refcount,
+                                                bool set_on_complete) {
+  call_data* calld = static_cast<call_data*>(elem->call_data);
+  subchannel_batch_data* batch_data =
+      new (gpr_arena_alloc(calld->arena, sizeof(*batch_data)))
+          subchannel_batch_data(elem, calld, refcount, set_on_complete);
   return batch_data;
 }
 
 static void batch_data_unref(subchannel_batch_data* batch_data) {
   if (gpr_unref(&batch_data->refs)) {
-    subchannel_call_retry_state* retry_state =
-        static_cast<subchannel_call_retry_state*>(
-            grpc_connected_subchannel_call_get_parent_data(
-                batch_data->subchannel_call));
-    if (batch_data->batch.send_initial_metadata) {
-      grpc_metadata_batch_destroy(&retry_state->send_initial_metadata);
-    }
-    if (batch_data->batch.send_trailing_metadata) {
-      grpc_metadata_batch_destroy(&retry_state->send_trailing_metadata);
-    }
-    if (batch_data->batch.recv_initial_metadata) {
-      grpc_metadata_batch_destroy(&retry_state->recv_initial_metadata);
-    }
-    if (batch_data->batch.recv_trailing_metadata) {
-      grpc_metadata_batch_destroy(&retry_state->recv_trailing_metadata);
-    }
-    GRPC_SUBCHANNEL_CALL_UNREF(batch_data->subchannel_call, "batch_data_unref");
-    call_data* calld = static_cast<call_data*>(batch_data->elem->call_data);
-    GRPC_CALL_STACK_UNREF(calld->owning_call, "batch_data");
+    batch_data->destroy();
   }
 }
 
@@ -1996,7 +2069,7 @@ static bool pending_batch_is_unstarted(
     return true;
   }
   if (pending->batch->send_message &&
-      retry_state->started_send_message_count < calld->send_messages->size()) {
+      retry_state->started_send_message_count < calld->send_messages.size()) {
     return true;
   }
   if (pending->batch->send_trailing_metadata &&
@@ -2152,7 +2225,7 @@ static void add_closures_for_replay_or_pending_send_ops(
   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
   call_data* calld = static_cast<call_data*>(elem->call_data);
   bool have_pending_send_message_ops =
-      retry_state->started_send_message_count < calld->send_messages->size();
+      retry_state->started_send_message_count < calld->send_messages.size();
   bool have_pending_send_trailing_metadata_op =
       calld->seen_send_trailing_metadata &&
       !retry_state->started_send_trailing_metadata;
@@ -2344,7 +2417,7 @@ static void add_retriable_send_message_op(
             chand, calld, retry_state->started_send_message_count);
   }
   grpc_core::ByteStreamCache* cache =
-      (*calld->send_messages)[retry_state->started_send_message_count];
+      calld->send_messages[retry_state->started_send_message_count];
   ++retry_state->started_send_message_count;
   retry_state->send_message.Init(cache);
   batch_data->batch.send_message = true;
@@ -2476,7 +2549,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
   }
   // send_message.
   // Note that we can only have one send_message op in flight at a time.
-  if (retry_state->started_send_message_count < calld->send_messages->size() &&
+  if (retry_state->started_send_message_count < calld->send_messages.size() &&
       retry_state->started_send_message_count ==
           retry_state->completed_send_message_count &&
       !calld->pending_send_message) {
@@ -2497,7 +2570,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
   // to start, since we can't send down any more send_message ops after
   // send_trailing_metadata.
   if (calld->seen_send_trailing_metadata &&
-      retry_state->started_send_message_count == calld->send_messages->size() &&
+      retry_state->started_send_message_count == calld->send_messages.size() &&
       !retry_state->started_send_trailing_metadata &&
       !calld->pending_send_trailing_metadata) {
     if (grpc_client_channel_trace.enabled()) {
@@ -2549,7 +2622,7 @@ static void add_subchannel_batches_for_pending_batches(
     // send_message ops after send_trailing_metadata.
     if (batch->send_trailing_metadata &&
         (retry_state->started_send_message_count + batch->send_message <
-             calld->send_messages->size() ||
+             calld->send_messages.size() ||
          retry_state->started_send_trailing_metadata)) {
       continue;
     }
@@ -2716,11 +2789,9 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
     pending_batches_fail(elem, new_error, true /* yield_call_combiner */);
   } else {
     if (parent_data_size > 0) {
-      subchannel_call_retry_state* retry_state =
-          static_cast<subchannel_call_retry_state*>(
-              grpc_connected_subchannel_call_get_parent_data(
-                  calld->subchannel_call));
-      retry_state->batch_payload.context = calld->pick.subchannel_call_context;
+      new (grpc_connected_subchannel_call_get_parent_data(
+          calld->subchannel_call))
+          subchannel_call_retry_state(calld->pick.subchannel_call_context);
     }
     pending_batches_resume(elem);
   }
@@ -3260,21 +3331,8 @@ static void cc_start_transport_stream_op_batch(
 /* Constructor for call_data */
 static grpc_error* cc_init_call_elem(grpc_call_element* elem,
                                      const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
-  // Initialize data members.
-  calld->path = grpc_slice_ref_internal(args->path);
-  calld->call_start_time = args->start_time;
-  calld->deadline = args->deadline;
-  calld->arena = args->arena;
-  calld->owning_call = args->call_stack;
-  calld->call_combiner = args->call_combiner;
-  if (GPR_LIKELY(chand->deadline_checking_enabled)) {
-    grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
-                             calld->deadline);
-  }
-  calld->enable_retries = chand->enable_retries;
-  calld->send_messages.Init();
+  new (elem->call_data) call_data(elem, *chand, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -3283,34 +3341,12 @@ static void cc_destroy_call_elem(grpc_call_element* elem,
                                  const grpc_call_final_info* final_info,
                                  grpc_closure* then_schedule_closure) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
-  if (GPR_LIKELY(chand->deadline_checking_enabled)) {
-    grpc_deadline_state_destroy(elem);
-  }
-  grpc_slice_unref_internal(calld->path);
-  calld->retry_throttle_data.reset();
-  calld->method_params.reset();
-  GRPC_ERROR_UNREF(calld->cancel_error);
   if (GPR_LIKELY(calld->subchannel_call != nullptr)) {
     grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
                                              then_schedule_closure);
     then_schedule_closure = nullptr;
-    GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
-                               "client_channel_destroy_call");
-  }
-  for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
-    GPR_ASSERT(calld->pending_batches[i].batch == nullptr);
-  }
-  if (GPR_LIKELY(calld->pick.connected_subchannel != nullptr)) {
-    calld->pick.connected_subchannel.reset();
-  }
-  for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
-    if (calld->pick.subchannel_call_context[i].value != nullptr) {
-      calld->pick.subchannel_call_context[i].destroy(
-          calld->pick.subchannel_call_context[i].value);
-    }
   }
-  calld->send_messages.Destroy();
+  calld->~call_data();
   GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.cc b/src/core/ext/filters/client_channel/health/health_check_client.cc
index 591637aa868..587919596ff 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.cc
+++ b/src/core/ext/filters/client_channel/health/health_check_client.cc
@@ -286,10 +286,9 @@ HealthCheckClient::CallState::CallState(
       health_check_client_(std::move(health_check_client)),
       pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
       arena_(gpr_arena_create(health_check_client_->connected_subchannel_
-                                  ->GetInitialCallSizeEstimate(0))) {
-  memset(&call_combiner_, 0, sizeof(call_combiner_));
+                                  ->GetInitialCallSizeEstimate(0))),
+      payload_(context_) {
   grpc_call_combiner_init(&call_combiner_);
-  memset(context_, 0, sizeof(context_));
   gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0));
 }
 
diff --git a/src/core/ext/filters/client_channel/health/health_check_client.h b/src/core/ext/filters/client_channel/health/health_check_client.h
index 7f77348f185..f6babef7d62 100644
--- a/src/core/ext/filters/client_channel/health/health_check_client.h
+++ b/src/core/ext/filters/client_channel/health/health_check_client.h
@@ -97,7 +97,7 @@ class HealthCheckClient
 
     gpr_arena* arena_;
     grpc_call_combiner call_combiner_;
-    grpc_call_context_element context_[GRPC_CONTEXT_COUNT];
+    grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};
 
     // The streaming call to the backend. Always non-NULL.
     grpc_subchannel_call* call_;
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index 21f80b7b947..b0040457a61 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -63,29 +63,29 @@ class LoadBalancingPolicy
   /// State used for an LB pick.
   struct PickState {
     /// Initial metadata associated with the picking call.
-    grpc_metadata_batch* initial_metadata;
+    grpc_metadata_batch* initial_metadata = nullptr;
     /// Bitmask used for selective cancelling. See
     /// \a CancelMatchingPicksLocked() and \a GRPC_INITIAL_METADATA_* in
     /// grpc_types.h.
-    uint32_t initial_metadata_flags;
+    uint32_t initial_metadata_flags = 0;
     /// Storage for LB token in \a initial_metadata, or nullptr if not used.
     grpc_linked_mdelem lb_token_mdelem_storage;
     /// Closure to run when pick is complete, if not completed synchronously.
     /// If null, pick will fail if a result is not available synchronously.
-    grpc_closure* on_complete;
+    grpc_closure* on_complete = nullptr;
     /// Will be set to the selected subchannel, or nullptr on failure or when
     /// the LB policy decides to drop the call.
     RefCountedPtr<ConnectedSubchannel> connected_subchannel;
     /// Will be populated with context to pass to the subchannel call, if
     /// needed.
-    grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT];
+    grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT] = {};
     /// Upon success, \a *user_data will be set to whatever opaque information
     /// may need to be propagated from the LB policy, or nullptr if not needed.
     // TODO(roth): As part of revamping our metadata APIs, try to find a
     // way to clean this up and C++-ify it.
-    void** user_data;
+    void** user_data = nullptr;
     /// Next pointer.  For internal use by LB policy.
-    PickState* next;
+    PickState* next = nullptr;
   };
 
   // Not copyable nor movable.
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index cc259bcdbff..399bb452f45 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -37,16 +37,27 @@ static void destroy_channel_elem(grpc_channel_element* elem) {}
 namespace {
 
 struct call_data {
+  call_data(const grpc_call_element_args& args) {
+    if (args.context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
+      // Get stats object from context and take a ref.
+      client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
+                         args.context[GRPC_GRPCLB_CLIENT_STATS].value)
+                         ->Ref();
+      // Record call started.
+      client_stats->AddCallStarted();
+    }
+  }
+
   // Stats object to update.
   grpc_core::RefCountedPtr<grpc_core::GrpcLbClientStats> client_stats;
   // State for intercepting send_initial_metadata.
   grpc_closure on_complete_for_send;
   grpc_closure* original_on_complete_for_send;
-  bool send_initial_metadata_succeeded;
+  bool send_initial_metadata_succeeded = false;
   // State for intercepting recv_initial_metadata.
   grpc_closure recv_initial_metadata_ready;
   grpc_closure* original_recv_initial_metadata_ready;
-  bool recv_initial_metadata_succeeded;
+  bool recv_initial_metadata_succeeded = false;
 };
 
 }  // namespace
@@ -70,16 +81,8 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
 
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  // Get stats object from context and take a ref.
   GPR_ASSERT(args->context != nullptr);
-  if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
-    calld->client_stats = static_cast<grpc_core::GrpcLbClientStats*>(
-                              args->context[GRPC_GRPCLB_CLIENT_STATS].value)
-                              ->Ref();
-    // Record call started.
-    calld->client_stats->AddCallStarted();
-  }
+  new (elem->call_data) call_data(*args);
   return GRPC_ERROR_NONE;
 }
 
@@ -97,6 +100,7 @@ static void destroy_call_elem(grpc_call_element* elem,
     // TODO(roth): Eliminate this once filter stack is converted to C++.
     calld->client_stats.reset();
   }
+  calld->~call_data();
 }
 
 static void start_transport_stream_op_batch(
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 340fa0c464b..a56db0201b8 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -162,12 +162,16 @@ struct grpc_subchannel {
 };
 
 struct grpc_subchannel_call {
+  grpc_subchannel_call(grpc_core::ConnectedSubchannel* connection,
+                       const grpc_core::ConnectedSubchannel::CallArgs& args)
+      : connection(connection), deadline(args.deadline) {}
+
   grpc_core::ConnectedSubchannel* connection;
-  grpc_closure* schedule_closure_after_destroy;
+  grpc_closure* schedule_closure_after_destroy = nullptr;
   // state needed to support channelz interception of recv trailing metadata.
   grpc_closure recv_trailing_metadata_ready;
   grpc_closure* original_recv_trailing_metadata;
-  grpc_metadata_batch* recv_trailing_metadata;
+  grpc_metadata_batch* recv_trailing_metadata = nullptr;
   grpc_millis deadline;
 };
 
@@ -905,6 +909,7 @@ static void subchannel_call_destroy(void* call, grpc_error* error) {
   grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
                           c->schedule_closure_after_destroy);
   connection->Unref(DEBUG_LOCATION, "subchannel_call");
+  c->~grpc_subchannel_call();
 }
 
 void grpc_subchannel_call_set_cleanup_closure(grpc_subchannel_call* call,
@@ -1102,14 +1107,12 @@ grpc_error* ConnectedSubchannel::CreateCall(const CallArgs& args,
                                             grpc_subchannel_call** call) {
   const size_t allocation_size =
       GetInitialCallSizeEstimate(args.parent_data_size);
-  *call = static_cast<grpc_subchannel_call*>(
-      gpr_arena_alloc(args.arena, allocation_size));
+  *call = new (gpr_arena_alloc(args.arena, allocation_size))
+      grpc_subchannel_call(this, args);
   grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(*call);
   RefCountedPtr<ConnectedSubchannel> connection =
       Ref(DEBUG_LOCATION, "subchannel_call");
   connection.release();  // Ref is passed to the grpc_subchannel_call object.
-  (*call)->connection = this;
-  (*call)->deadline = args.deadline;
   const grpc_call_element_args call_args = {
       callstk,           /* call_stack */
       nullptr,           /* server_transport_data */
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index d23ad67ad51..b4cb07f0f92 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -27,6 +27,7 @@
 #include <grpc/support/time.h>
 
 #include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/gprpp/memory.h"
 #include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/slice/slice_internal.h"
 #include "src/core/lib/surface/channel_init.h"
@@ -152,7 +153,11 @@ static void inject_recv_trailing_metadata_ready(
 // Callback and associated state for starting the timer after call stack
 // initialization has been completed.
 struct start_timer_after_init_state {
-  bool in_call_combiner;
+  start_timer_after_init_state(grpc_call_element* elem, grpc_millis deadline)
+      : elem(elem), deadline(deadline) {}
+  ~start_timer_after_init_state() { start_timer_if_needed(elem, deadline); }
+
+  bool in_call_combiner = false;
   grpc_call_element* elem;
   grpc_millis deadline;
   grpc_closure closure;
@@ -171,20 +176,16 @@ static void start_timer_after_init(void* arg, grpc_error* error) {
                              "scheduling deadline timer");
     return;
   }
-  start_timer_if_needed(state->elem, state->deadline);
-  gpr_free(state);
+  grpc_core::Delete(state);
   GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
                           "done scheduling deadline timer");
 }
 
-void grpc_deadline_state_init(grpc_call_element* elem,
-                              grpc_call_stack* call_stack,
-                              grpc_call_combiner* call_combiner,
-                              grpc_millis deadline) {
-  grpc_deadline_state* deadline_state =
-      static_cast<grpc_deadline_state*>(elem->call_data);
-  deadline_state->call_stack = call_stack;
-  deadline_state->call_combiner = call_combiner;
+grpc_deadline_state::grpc_deadline_state(grpc_call_element* elem,
+                                         grpc_call_stack* call_stack,
+                                         grpc_call_combiner* call_combiner,
+                                         grpc_millis deadline)
+    : call_stack(call_stack), call_combiner(call_combiner) {
   // Deadline will always be infinite on servers, so the timer will only be
   // set on clients with a finite deadline.
   if (deadline != GRPC_MILLIS_INF_FUTURE) {
@@ -196,21 +197,14 @@ void grpc_deadline_state_init(grpc_call_element* elem,
     // create a closure to start the timer, and we schedule that closure
     // to be run after call stack initialization is done.
     struct start_timer_after_init_state* state =
-        static_cast<struct start_timer_after_init_state*>(
-            gpr_zalloc(sizeof(*state)));
-    state->elem = elem;
-    state->deadline = deadline;
+        grpc_core::New<start_timer_after_init_state>(elem, deadline);
     GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
                       grpc_schedule_on_exec_ctx);
     GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
   }
 }
 
-void grpc_deadline_state_destroy(grpc_call_element* elem) {
-  grpc_deadline_state* deadline_state =
-      static_cast<grpc_deadline_state*>(elem->call_data);
-  cancel_timer_if_needed(deadline_state);
-}
+grpc_deadline_state::~grpc_deadline_state() { cancel_timer_if_needed(this); }
 
 void grpc_deadline_state_reset(grpc_call_element* elem,
                                grpc_millis new_deadline) {
@@ -269,8 +263,8 @@ typedef struct server_call_data {
 // Constructor for call_data.  Used for both client and server filters.
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
-                           args->deadline);
+  new (elem->call_data) grpc_deadline_state(
+      elem, args->call_stack, args->call_combiner, args->deadline);
   return GRPC_ERROR_NONE;
 }
 
@@ -278,7 +272,9 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
 static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
-  grpc_deadline_state_destroy(elem);
+  grpc_deadline_state* deadline_state =
+      static_cast<grpc_deadline_state*>(elem->call_data);
+  deadline_state->~grpc_deadline_state();
 }
 
 // Method for starting a call op for client filter.
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index 1d797f445a7..e37032999c6 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -22,19 +22,23 @@
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/iomgr/timer.h"
 
-typedef enum grpc_deadline_timer_state {
+enum grpc_deadline_timer_state {
   GRPC_DEADLINE_STATE_INITIAL,
   GRPC_DEADLINE_STATE_PENDING,
   GRPC_DEADLINE_STATE_FINISHED
-} grpc_deadline_timer_state;
+};
 
 // State used for filters that enforce call deadlines.
 // Must be the first field in the filter's call_data.
-typedef struct grpc_deadline_state {
+struct grpc_deadline_state {
+  grpc_deadline_state(grpc_call_element* elem, grpc_call_stack* call_stack,
+                      grpc_call_combiner* call_combiner, grpc_millis deadline);
+  ~grpc_deadline_state();
+
   // We take a reference to the call stack for the timer callback.
   grpc_call_stack* call_stack;
   grpc_call_combiner* call_combiner;
-  grpc_deadline_timer_state timer_state;
+  grpc_deadline_timer_state timer_state = GRPC_DEADLINE_STATE_INITIAL;
   grpc_timer timer;
   grpc_closure timer_callback;
   // Closure to invoke when we receive trailing metadata.
@@ -43,21 +47,13 @@ typedef struct grpc_deadline_state {
   // The original recv_trailing_metadata_ready closure, which we chain to
   // after our own closure is invoked.
   grpc_closure* original_recv_trailing_metadata_ready;
-} grpc_deadline_state;
+};
 
 //
 // NOTE: All of these functions require that the first field in
 // elem->call_data is a grpc_deadline_state.
 //
 
-// assumes elem->call_data is zero'd
-void grpc_deadline_state_init(grpc_call_element* elem,
-                              grpc_call_stack* call_stack,
-                              grpc_call_combiner* call_combiner,
-                              grpc_millis deadline);
-
-void grpc_deadline_state_destroy(grpc_call_element* elem);
-
 // Cancels the existing timer and starts a new one with new_deadline.
 //
 // Note: It is generally safe to call this with an earlier deadline
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index cd459e47cd2..bf9a01f659b 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -37,10 +37,31 @@
 #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
 
 /* default maximum size of payload eligable for GET request */
-static const size_t kMaxPayloadSizeForGet = 2048;
+static constexpr size_t kMaxPayloadSizeForGet = 2048;
+
+static void recv_initial_metadata_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+static void send_message_on_complete(void* arg, grpc_error* error);
 
 namespace {
 struct call_data {
+  call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+      : call_combiner(args.call_combiner) {
+    GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+                      ::recv_initial_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+                      ::recv_trailing_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+                      elem, grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+                      elem, grpc_schedule_on_exec_ctx);
+  }
+
+  ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
   grpc_call_combiner* call_combiner;
   // State for handling send_initial_metadata ops.
   grpc_linked_mdelem method;
@@ -51,18 +72,18 @@ struct call_data {
   grpc_linked_mdelem user_agent;
   // State for handling recv_initial_metadata ops.
   grpc_metadata_batch* recv_initial_metadata;
-  grpc_error* recv_initial_metadata_error;
-  grpc_closure* original_recv_initial_metadata_ready;
+  grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
+  grpc_closure* original_recv_initial_metadata_ready = nullptr;
   grpc_closure recv_initial_metadata_ready;
   // State for handling recv_trailing_metadata ops.
   grpc_metadata_batch* recv_trailing_metadata;
   grpc_closure* original_recv_trailing_metadata_ready;
   grpc_closure recv_trailing_metadata_ready;
-  grpc_error* recv_trailing_metadata_error;
-  bool seen_recv_trailing_metadata_ready;
+  grpc_error* recv_trailing_metadata_error = GRPC_ERROR_NONE;
+  bool seen_recv_trailing_metadata_ready = false;
   // State for handling send_message ops.
   grpc_transport_stream_op_batch* send_message_batch;
-  size_t send_message_bytes_read;
+  size_t send_message_bytes_read = 0;
   grpc_core::ManualConstructor<grpc_core::ByteStreamCache> send_message_cache;
   grpc_core::ManualConstructor<grpc_core::ByteStreamCache::CachingByteStream>
       send_message_caching_stream;
@@ -442,18 +463,7 @@ done:
 /* Constructor for call_data */
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  calld->call_combiner = args->call_combiner;
-  GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
-                    recv_initial_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
-                    recv_trailing_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
-                    elem, grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
-                    on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
+  new (elem->call_data) call_data(elem, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -462,7 +472,7 @@ static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+  calld->~call_data();
 }
 
 static grpc_mdelem scheme_from_args(const grpc_channel_args* args) {
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index 933fe3c77b7..9c8c8d9e188 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -39,6 +39,10 @@
 #include "src/core/lib/surface/call.h"
 #include "src/core/lib/transport/static_metadata.h"
 
+static void start_send_message_batch(void* arg, grpc_error* unused);
+static void send_message_on_complete(void* arg, grpc_error* error);
+static void on_send_message_next_done(void* arg, grpc_error* error);
+
 namespace {
 enum initial_metadata_state {
   // Initial metadata not yet seen.
@@ -50,6 +54,23 @@ enum initial_metadata_state {
 };
 
 struct call_data {
+  call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+      : call_combiner(args.call_combiner) {
+    GRPC_CLOSURE_INIT(&start_send_message_batch_in_call_combiner,
+                      start_send_message_batch, elem,
+                      grpc_schedule_on_exec_ctx);
+    grpc_slice_buffer_init(&slices);
+    GRPC_CLOSURE_INIT(&send_message_on_complete, ::send_message_on_complete,
+                      elem, grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&on_send_message_next_done, ::on_send_message_next_done,
+                      elem, grpc_schedule_on_exec_ctx);
+  }
+
+  ~call_data() {
+    grpc_slice_buffer_destroy_internal(&slices);
+    GRPC_ERROR_UNREF(cancel_error);
+  }
+
   grpc_call_combiner* call_combiner;
   grpc_linked_mdelem compression_algorithm_storage;
   grpc_linked_mdelem stream_compression_algorithm_storage;
@@ -57,11 +78,12 @@ struct call_data {
   grpc_linked_mdelem accept_stream_encoding_storage;
   /** Compression algorithm we'll try to use. It may be given by incoming
    * metadata, or by the channel's default compression settings. */
-  grpc_message_compression_algorithm message_compression_algorithm;
-  initial_metadata_state send_initial_metadata_state;
-  grpc_error* cancel_error;
+  grpc_message_compression_algorithm message_compression_algorithm =
+      GRPC_MESSAGE_COMPRESS_NONE;
+  initial_metadata_state send_initial_metadata_state = INITIAL_METADATA_UNSEEN;
+  grpc_error* cancel_error = GRPC_ERROR_NONE;
   grpc_closure start_send_message_batch_in_call_combiner;
-  grpc_transport_stream_op_batch* send_message_batch;
+  grpc_transport_stream_op_batch* send_message_batch = nullptr;
   grpc_slice_buffer slices; /**< Buffers up input slices to be compressed */
   grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream>
       replacement_stream;
@@ -424,16 +446,7 @@ static void compress_start_transport_stream_op_batch(
 /* Constructor for call_data */
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  calld->call_combiner = args->call_combiner;
-  calld->cancel_error = GRPC_ERROR_NONE;
-  grpc_slice_buffer_init(&calld->slices);
-  GRPC_CLOSURE_INIT(&calld->start_send_message_batch_in_call_combiner,
-                    start_send_message_batch, elem, grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->on_send_message_next_done,
-                    on_send_message_next_done, elem, grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->send_message_on_complete, send_message_on_complete,
-                    elem, grpc_schedule_on_exec_ctx);
+  new (elem->call_data) call_data(elem, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -442,8 +455,7 @@ static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  grpc_slice_buffer_destroy_internal(&calld->slices);
-  GRPC_ERROR_UNREF(calld->cancel_error);
+  calld->~call_data();
 }
 
 /* Constructor for channel_data */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 436ea09d942..ce1be8370c6 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -35,9 +35,32 @@
 #define EXPECTED_CONTENT_TYPE "application/grpc"
 #define EXPECTED_CONTENT_TYPE_LENGTH sizeof(EXPECTED_CONTENT_TYPE) - 1
 
+static void hs_recv_initial_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_trailing_metadata_ready(void* user_data, grpc_error* err);
+static void hs_recv_message_ready(void* user_data, grpc_error* err);
+
 namespace {
 
 struct call_data {
+  call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+      : call_combiner(args.call_combiner) {
+    GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+                      hs_recv_initial_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&recv_message_ready, hs_recv_message_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+                      hs_recv_trailing_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+  }
+
+  ~call_data() {
+    GRPC_ERROR_UNREF(recv_initial_metadata_ready_error);
+    if (have_read_stream) {
+      read_stream->Orphan();
+    }
+  }
+
   grpc_call_combiner* call_combiner;
 
   // Outgoing headers to add to send_initial_metadata.
@@ -47,27 +70,27 @@ struct call_data {
   // If we see the recv_message contents in the GET query string, we
   // store it here.
   grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> read_stream;
-  bool have_read_stream;
+  bool have_read_stream = false;
 
   // State for intercepting recv_initial_metadata.
   grpc_closure recv_initial_metadata_ready;
-  grpc_error* recv_initial_metadata_ready_error;
+  grpc_error* recv_initial_metadata_ready_error = GRPC_ERROR_NONE;
   grpc_closure* original_recv_initial_metadata_ready;
-  grpc_metadata_batch* recv_initial_metadata;
+  grpc_metadata_batch* recv_initial_metadata = nullptr;
   uint32_t* recv_initial_metadata_flags;
-  bool seen_recv_initial_metadata_ready;
+  bool seen_recv_initial_metadata_ready = false;
 
   // State for intercepting recv_message.
   grpc_closure* original_recv_message_ready;
   grpc_closure recv_message_ready;
   grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
-  bool seen_recv_message_ready;
+  bool seen_recv_message_ready = false;
 
   // State for intercepting recv_trailing_metadata
   grpc_closure recv_trailing_metadata_ready;
   grpc_closure* original_recv_trailing_metadata_ready;
   grpc_error* recv_trailing_metadata_ready_error;
-  bool seen_recv_trailing_metadata_ready;
+  bool seen_recv_trailing_metadata_ready = false;
 };
 
 struct channel_data {
@@ -431,16 +454,7 @@ static void hs_start_transport_stream_op_batch(
 /* Constructor for call_data */
 static grpc_error* hs_init_call_elem(grpc_call_element* elem,
                                      const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  calld->call_combiner = args->call_combiner;
-  GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
-                    hs_recv_initial_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->recv_message_ready, hs_recv_message_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
-                    hs_recv_trailing_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
+  new (elem->call_data) call_data(elem, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -449,10 +463,7 @@ static void hs_destroy_call_elem(grpc_call_element* elem,
                                  const grpc_call_final_info* final_info,
                                  grpc_closure* ignored) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  GRPC_ERROR_UNREF(calld->recv_initial_metadata_ready_error);
-  if (calld->have_read_stream) {
-    calld->read_stream->Orphan();
-  }
+  calld->~call_data();
 }
 
 /* Constructor for channel_data */
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 2d3b16d992c..94d6942aa4f 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -90,9 +90,53 @@ RefCountedPtr<MessageSizeLimits> MessageSizeLimits::CreateFromJson(
 }  // namespace
 }  // namespace grpc_core
 
+static void recv_message_ready(void* user_data, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
 namespace {
 
+struct channel_data {
+  message_size_limits limits;
+  // Maps path names to refcounted_message_size_limits structs.
+  grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
+      grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
+      method_limit_table;
+};
+
 struct call_data {
+  call_data(grpc_call_element* elem, const channel_data& chand,
+            const grpc_call_element_args& args)
+      : call_combiner(args.call_combiner), limits(chand.limits) {
+    GRPC_CLOSURE_INIT(&recv_message_ready, ::recv_message_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+                      ::recv_trailing_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    // Get max sizes from channel data, then merge in per-method config values.
+    // Note: Per-method config is only available on the client, so we
+    // apply the max request size to the send limit and the max response
+    // size to the receive limit.
+    if (chand.method_limit_table != nullptr) {
+      grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
+          grpc_core::ServiceConfig::MethodConfigTableLookup(
+              *chand.method_limit_table, args.path);
+      if (limits != nullptr) {
+        if (limits->limits().max_send_size >= 0 &&
+            (limits->limits().max_send_size < this->limits.max_send_size ||
+             this->limits.max_send_size < 0)) {
+          this->limits.max_send_size = limits->limits().max_send_size;
+        }
+        if (limits->limits().max_recv_size >= 0 &&
+            (limits->limits().max_recv_size < this->limits.max_recv_size ||
+             this->limits.max_recv_size < 0)) {
+          this->limits.max_recv_size = limits->limits().max_recv_size;
+        }
+      }
+    }
+  }
+
+  ~call_data() { GRPC_ERROR_UNREF(error); }
+
   grpc_call_combiner* call_combiner;
   message_size_limits limits;
   // Receive closures are chained: we inject this closure as the
@@ -101,25 +145,17 @@ struct call_data {
   grpc_closure recv_message_ready;
   grpc_closure recv_trailing_metadata_ready;
   // The error caused by a message that is too large, or GRPC_ERROR_NONE
-  grpc_error* error;
+  grpc_error* error = GRPC_ERROR_NONE;
   // Used by recv_message_ready.
-  grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+  grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
   // Original recv_message_ready callback, invoked after our own.
-  grpc_closure* next_recv_message_ready;
+  grpc_closure* next_recv_message_ready = nullptr;
   // Original recv_trailing_metadata callback, invoked after our own.
   grpc_closure* original_recv_trailing_metadata_ready;
-  bool seen_recv_trailing_metadata;
+  bool seen_recv_trailing_metadata = false;
   grpc_error* recv_trailing_metadata_error;
 };
 
-struct channel_data {
-  message_size_limits limits;
-  // Maps path names to refcounted_message_size_limits structs.
-  grpc_core::RefCountedPtr<grpc_core::SliceHashTable<
-      grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits>>>
-      method_limit_table;
-};
-
 }  // namespace
 
 // Callback invoked when we receive a message.  Here we check the max
@@ -228,38 +264,7 @@ static void start_transport_stream_op_batch(
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   channel_data* chand = static_cast<channel_data*>(elem->channel_data);
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  calld->call_combiner = args->call_combiner;
-  calld->next_recv_message_ready = nullptr;
-  calld->original_recv_trailing_metadata_ready = nullptr;
-  calld->error = GRPC_ERROR_NONE;
-  GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
-                    recv_trailing_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  // Get max sizes from channel data, then merge in per-method config values.
-  // Note: Per-method config is only available on the client, so we
-  // apply the max request size to the send limit and the max response
-  // size to the receive limit.
-  calld->limits = chand->limits;
-  if (chand->method_limit_table != nullptr) {
-    grpc_core::RefCountedPtr<grpc_core::MessageSizeLimits> limits =
-        grpc_core::ServiceConfig::MethodConfigTableLookup(
-            *chand->method_limit_table, args->path);
-    if (limits != nullptr) {
-      if (limits->limits().max_send_size >= 0 &&
-          (limits->limits().max_send_size < calld->limits.max_send_size ||
-           calld->limits.max_send_size < 0)) {
-        calld->limits.max_send_size = limits->limits().max_send_size;
-      }
-      if (limits->limits().max_recv_size >= 0 &&
-          (limits->limits().max_recv_size < calld->limits.max_recv_size ||
-           calld->limits.max_recv_size < 0)) {
-        calld->limits.max_recv_size = limits->limits().max_recv_size;
-      }
-    }
-  }
+  new (elem->call_data) call_data(elem, *chand, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -268,7 +273,7 @@ static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
-  GRPC_ERROR_UNREF(calld->error);
+  calld->~call_data();
 }
 
 static int default_size(const grpc_channel_args* args,
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 7c2dc0bcbea..978ecd59e4c 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -154,53 +154,52 @@ bool g_flow_control_enabled = true;
 /*******************************************************************************
  * CONSTRUCTION/DESTRUCTION/REFCOUNTING
  */
-
-static void destruct_transport(grpc_chttp2_transport* t) {
+grpc_chttp2_transport::~grpc_chttp2_transport() {
   size_t i;
 
-  if (t->channelz_socket != nullptr) {
-    t->channelz_socket.reset();
+  if (channelz_socket != nullptr) {
+    channelz_socket.reset();
   }
 
-  grpc_endpoint_destroy(t->ep);
+  grpc_endpoint_destroy(ep);
 
-  grpc_slice_buffer_destroy_internal(&t->qbuf);
+  grpc_slice_buffer_destroy_internal(&qbuf);
 
-  grpc_slice_buffer_destroy_internal(&t->outbuf);
-  grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
+  grpc_slice_buffer_destroy_internal(&outbuf);
+  grpc_chttp2_hpack_compressor_destroy(&hpack_compressor);
 
-  grpc_slice_buffer_destroy_internal(&t->read_buffer);
-  grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
-  grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
+  grpc_slice_buffer_destroy_internal(&read_buffer);
+  grpc_chttp2_hpack_parser_destroy(&hpack_parser);
+  grpc_chttp2_goaway_parser_destroy(&goaway_parser);
 
   for (i = 0; i < STREAM_LIST_COUNT; i++) {
-    GPR_ASSERT(t->lists[i].head == nullptr);
-    GPR_ASSERT(t->lists[i].tail == nullptr);
+    GPR_ASSERT(lists[i].head == nullptr);
+    GPR_ASSERT(lists[i].tail == nullptr);
   }
 
-  GRPC_ERROR_UNREF(t->goaway_error);
+  GRPC_ERROR_UNREF(goaway_error);
 
-  GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
+  GPR_ASSERT(grpc_chttp2_stream_map_size(&stream_map) == 0);
 
-  grpc_chttp2_stream_map_destroy(&t->stream_map);
-  grpc_connectivity_state_destroy(&t->channel_callback.state_tracker);
+  grpc_chttp2_stream_map_destroy(&stream_map);
+  grpc_connectivity_state_destroy(&channel_callback.state_tracker);
 
-  GRPC_COMBINER_UNREF(t->combiner, "chttp2_transport");
+  GRPC_COMBINER_UNREF(combiner, "chttp2_transport");
 
-  cancel_pings(t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
+  cancel_pings(this,
+               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
 
-  while (t->write_cb_pool) {
-    grpc_chttp2_write_cb* next = t->write_cb_pool->next;
-    gpr_free(t->write_cb_pool);
-    t->write_cb_pool = next;
+  while (write_cb_pool) {
+    grpc_chttp2_write_cb* next = write_cb_pool->next;
+    gpr_free(write_cb_pool);
+    write_cb_pool = next;
   }
 
-  t->flow_control.Destroy();
+  flow_control.Destroy();
 
-  GRPC_ERROR_UNREF(t->closed_with_error);
-  gpr_free(t->ping_acks);
-  gpr_free(t->peer_string);
-  gpr_free(t);
+  GRPC_ERROR_UNREF(closed_with_error);
+  gpr_free(ping_acks);
+  gpr_free(peer_string);
 }
 
 #ifndef NDEBUG
@@ -212,7 +211,8 @@ void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason,
             t, val, val - 1, reason, file, line);
   }
   if (!gpr_unref(&t->refs)) return;
-  destruct_transport(t);
+  t->~grpc_chttp2_transport();
+  gpr_free(t);
 }
 
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
@@ -227,7 +227,8 @@ void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
 #else
 void grpc_chttp2_unref_transport(grpc_chttp2_transport* t) {
   if (!gpr_unref(&t->refs)) return;
-  destruct_transport(t);
+  t->~grpc_chttp2_transport();
+  gpr_free(t);
 }
 
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
@@ -481,115 +482,97 @@ static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) {
   }
 }
 
-static void init_transport(grpc_chttp2_transport* t,
-                           const grpc_channel_args* channel_args,
-                           grpc_endpoint* ep, bool is_client,
-                           grpc_resource_user* resource_user) {
+grpc_chttp2_transport::grpc_chttp2_transport(
+    const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
+    grpc_resource_user* resource_user)
+    : ep(ep),
+      peer_string(grpc_endpoint_get_peer(ep)),
+      resource_user(resource_user),
+      combiner(grpc_combiner_create()),
+      is_client(is_client),
+      next_stream_id(is_client ? 1 : 2),
+      deframe_state(is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0) {
   GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
              GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
-
-  t->base.vtable = get_vtable();
-  t->ep = ep;
+  base.vtable = get_vtable();
   /* one ref is for destroy */
-  gpr_ref_init(&t->refs, 1);
-  t->combiner = grpc_combiner_create();
-  t->peer_string = grpc_endpoint_get_peer(ep);
-  t->endpoint_reading = 1;
-  t->next_stream_id = is_client ? 1 : 2;
-  t->is_client = is_client;
-  t->resource_user = resource_user;
-  t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
-  t->is_first_frame = true;
-  grpc_connectivity_state_init(
-      &t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
-      is_client ? "client_transport" : "server_transport");
-
-  grpc_slice_buffer_init(&t->qbuf);
-  grpc_slice_buffer_init(&t->outbuf);
-  grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
-
-  init_transport_closures(t);
-
-  t->goaway_error = GRPC_ERROR_NONE;
-  grpc_chttp2_goaway_parser_init(&t->goaway_parser);
-  grpc_chttp2_hpack_parser_init(&t->hpack_parser);
-
-  grpc_slice_buffer_init(&t->read_buffer);
-
+  gpr_ref_init(&refs, 1);
   /* 8 is a random stab in the dark as to a good initial size: it's small enough
      that it shouldn't waste memory for infrequently used connections, yet
      large enough that the exponential growth should happen nicely when it's
      needed.
      TODO(ctiller): tune this */
-  grpc_chttp2_stream_map_init(&t->stream_map, 8);
+  grpc_chttp2_stream_map_init(&stream_map, 8);
 
+  grpc_slice_buffer_init(&read_buffer);
+  grpc_connectivity_state_init(
+      &channel_callback.state_tracker, GRPC_CHANNEL_READY,
+      is_client ? "client_transport" : "server_transport");
+  grpc_slice_buffer_init(&outbuf);
+  if (is_client) {
+    grpc_slice_buffer_add(&outbuf, grpc_slice_from_copied_string(
+                                       GRPC_CHTTP2_CLIENT_CONNECT_STRING));
+  }
+  grpc_chttp2_hpack_compressor_init(&hpack_compressor);
+  grpc_slice_buffer_init(&qbuf);
   /* copy in initial settings to all setting sets */
   size_t i;
   int j;
   for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
     for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
-      t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
+      settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
     }
   }
-  t->dirtied_local_settings = 1;
-  /* Hack: it's common for implementations to assume 65536 bytes initial send
-     window -- this should by rights be 0 */
-  t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
-  t->sent_local_settings = 0;
-  t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
+  grpc_chttp2_hpack_parser_init(&hpack_parser);
+  grpc_chttp2_goaway_parser_init(&goaway_parser);
 
-  if (is_client) {
-    grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
-                                          GRPC_CHTTP2_CLIENT_CONNECT_STRING));
-  }
+  init_transport_closures(this);
 
   /* configure http2 the way we like it */
   if (is_client) {
-    queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
-    queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+    queue_setting_update(this, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+    queue_setting_update(this, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
   }
-  queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+  queue_setting_update(this, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
                        DEFAULT_MAX_HEADER_LIST_SIZE);
-  queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
-                       1);
-
-  configure_transport_ping_policy(t);
-  init_transport_keepalive_settings(t);
+  queue_setting_update(this,
+                       GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
 
-  t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
+  configure_transport_ping_policy(this);
+  init_transport_keepalive_settings(this);
 
   bool enable_bdp = true;
   if (channel_args) {
-    enable_bdp = read_channel_args(t, channel_args, is_client);
+    enable_bdp = read_channel_args(this, channel_args, is_client);
   }
 
   if (g_flow_control_enabled) {
-    t->flow_control.Init<grpc_core::chttp2::TransportFlowControl>(t,
-                                                                  enable_bdp);
+    flow_control.Init<grpc_core::chttp2::TransportFlowControl>(this,
+                                                               enable_bdp);
   } else {
-    t->flow_control.Init<grpc_core::chttp2::TransportFlowControlDisabled>(t);
+    flow_control.Init<grpc_core::chttp2::TransportFlowControlDisabled>(this);
     enable_bdp = false;
   }
 
   /* No pings allowed before receiving a header or data frame. */
-  t->ping_state.pings_before_data_required = 0;
-  t->ping_state.is_delayed_ping_timer_set = false;
-  t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
+  ping_state.pings_before_data_required = 0;
+  ping_state.is_delayed_ping_timer_set = false;
+  ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
 
-  t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
-  t->ping_recv_state.ping_strikes = 0;
+  ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
+  ping_recv_state.ping_strikes = 0;
 
-  init_keepalive_pings_if_enabled(t);
+  init_keepalive_pings_if_enabled(this);
 
   if (enable_bdp) {
-    GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
-    schedule_bdp_ping_locked(t);
-    grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
+    GRPC_CHTTP2_REF_TRANSPORT(this, "bdp_ping");
+    schedule_bdp_ping_locked(this);
+    grpc_chttp2_act_on_flowctl_action(flow_control->PeriodicUpdate(), this,
                                       nullptr);
   }
 
-  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
-  post_benign_reclaimer(t);
+  grpc_chttp2_initiate_write(this, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
+  post_benign_reclaimer(this);
 }
 
 static void destroy_transport_locked(void* tp, grpc_error* error) {
@@ -599,6 +582,7 @@ static void destroy_transport_locked(void* tp, grpc_error* error) {
       t, grpc_error_set_int(
              GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
              GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+  // Must be the last line.
   GRPC_CHTTP2_UNREF_TRANSPORT(t, "destroy");
 }
 
@@ -683,115 +667,108 @@ void grpc_chttp2_stream_unref(grpc_chttp2_stream* s) {
 }
 #endif
 
-static int init_stream(grpc_transport* gt, grpc_stream* gs,
-                       grpc_stream_refcount* refcount, const void* server_data,
-                       gpr_arena* arena) {
-  GPR_TIMER_SCOPE("init_stream", 0);
-  grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
-  grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
-
-  s->t = t;
-  s->refcount = refcount;
+grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
+                                       grpc_stream_refcount* refcount,
+                                       const void* server_data,
+                                       gpr_arena* arena)
+    : t(t), refcount(refcount), metadata_buffer{{arena}, {arena}} {
   /* We reserve one 'active stream' that's dropped when the stream is
      read-closed. The others are for Chttp2IncomingByteStreams that are
      actively reading */
-  GRPC_CHTTP2_STREAM_REF(s, "chttp2");
-
-  grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[0], arena);
-  grpc_chttp2_incoming_metadata_buffer_init(&s->metadata_buffer[1], arena);
-  grpc_chttp2_data_parser_init(&s->data_parser);
-  grpc_slice_buffer_init(&s->flow_controlled_buffer);
-  s->deadline = GRPC_MILLIS_INF_FUTURE;
-  GRPC_CLOSURE_INIT(&s->complete_fetch_locked, complete_fetch_locked, s,
-                    grpc_schedule_on_exec_ctx);
-  grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer);
-  s->unprocessed_incoming_frames_buffer_cached_length = 0;
-  grpc_slice_buffer_init(&s->frame_storage);
-  grpc_slice_buffer_init(&s->compressed_data_buffer);
-  grpc_slice_buffer_init(&s->decompressed_data_buffer);
-  s->pending_byte_stream = false;
-  s->decompressed_header_bytes = 0;
-  GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s,
-                    grpc_combiner_scheduler(t->combiner));
-
+  GRPC_CHTTP2_STREAM_REF(this, "chttp2");
   GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
 
   if (server_data) {
-    s->id = static_cast<uint32_t>((uintptr_t)server_data);
-    *t->accepting_stream = s;
-    grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
+    id = static_cast<uint32_t>((uintptr_t)server_data);
+    *t->accepting_stream = this;
+    grpc_chttp2_stream_map_add(&t->stream_map, id, this);
     post_destructive_reclaimer(t);
   }
-
   if (t->flow_control->flow_control_enabled()) {
-    s->flow_control.Init<grpc_core::chttp2::StreamFlowControl>(
+    flow_control.Init<grpc_core::chttp2::StreamFlowControl>(
         static_cast<grpc_core::chttp2::TransportFlowControl*>(
             t->flow_control.get()),
-        s);
+        this);
   } else {
-    s->flow_control.Init<grpc_core::chttp2::StreamFlowControlDisabled>();
+    flow_control.Init<grpc_core::chttp2::StreamFlowControlDisabled>();
   }
 
-  return 0;
-}
+  grpc_slice_buffer_init(&frame_storage);
+  grpc_slice_buffer_init(&unprocessed_incoming_frames_buffer);
+  grpc_slice_buffer_init(&flow_controlled_buffer);
+  grpc_slice_buffer_init(&compressed_data_buffer);
+  grpc_slice_buffer_init(&decompressed_data_buffer);
 
-static void destroy_stream_locked(void* sp, grpc_error* error) {
-  GPR_TIMER_SCOPE("destroy_stream", 0);
-  grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
-  grpc_chttp2_transport* t = s->t;
+  GRPC_CLOSURE_INIT(&complete_fetch_locked, ::complete_fetch_locked, this,
+                    grpc_schedule_on_exec_ctx);
+  GRPC_CLOSURE_INIT(&reset_byte_stream, ::reset_byte_stream, this,
+                    grpc_combiner_scheduler(t->combiner));
+}
 
+grpc_chttp2_stream::~grpc_chttp2_stream() {
   if (t->channelz_socket != nullptr) {
-    if ((t->is_client && s->eos_received) || (!t->is_client && s->eos_sent)) {
+    if ((t->is_client && eos_received) || (!t->is_client && eos_sent)) {
       t->channelz_socket->RecordStreamSucceeded();
     } else {
       t->channelz_socket->RecordStreamFailed();
     }
   }
 
-  GPR_ASSERT((s->write_closed && s->read_closed) || s->id == 0);
-  if (s->id != 0) {
-    GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == nullptr);
+  GPR_ASSERT((write_closed && read_closed) || id == 0);
+  if (id != 0) {
+    GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, id) == nullptr);
   }
 
-  grpc_slice_buffer_destroy_internal(&s->unprocessed_incoming_frames_buffer);
-  grpc_slice_buffer_destroy_internal(&s->frame_storage);
-  grpc_slice_buffer_destroy_internal(&s->compressed_data_buffer);
-  grpc_slice_buffer_destroy_internal(&s->decompressed_data_buffer);
+  grpc_slice_buffer_destroy_internal(&unprocessed_incoming_frames_buffer);
+  grpc_slice_buffer_destroy_internal(&frame_storage);
+  grpc_slice_buffer_destroy_internal(&compressed_data_buffer);
+  grpc_slice_buffer_destroy_internal(&decompressed_data_buffer);
 
-  grpc_chttp2_list_remove_stalled_by_transport(t, s);
-  grpc_chttp2_list_remove_stalled_by_stream(t, s);
+  grpc_chttp2_list_remove_stalled_by_transport(t, this);
+  grpc_chttp2_list_remove_stalled_by_stream(t, this);
 
   for (int i = 0; i < STREAM_LIST_COUNT; i++) {
-    if (GPR_UNLIKELY(s->included[i])) {
+    if (GPR_UNLIKELY(included[i])) {
       gpr_log(GPR_ERROR, "%s stream %d still included in list %d",
-              t->is_client ? "client" : "server", s->id, i);
+              t->is_client ? "client" : "server", id, i);
       abort();
     }
   }
 
-  GPR_ASSERT(s->send_initial_metadata_finished == nullptr);
-  GPR_ASSERT(s->fetching_send_message == nullptr);
-  GPR_ASSERT(s->send_trailing_metadata_finished == nullptr);
-  GPR_ASSERT(s->recv_initial_metadata_ready == nullptr);
-  GPR_ASSERT(s->recv_message_ready == nullptr);
-  GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);
-  grpc_chttp2_data_parser_destroy(&s->data_parser);
-  grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
-  grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
-  grpc_slice_buffer_destroy_internal(&s->flow_controlled_buffer);
-  GRPC_ERROR_UNREF(s->read_closed_error);
-  GRPC_ERROR_UNREF(s->write_closed_error);
-  GRPC_ERROR_UNREF(s->byte_stream_error);
+  GPR_ASSERT(send_initial_metadata_finished == nullptr);
+  GPR_ASSERT(fetching_send_message == nullptr);
+  GPR_ASSERT(send_trailing_metadata_finished == nullptr);
+  GPR_ASSERT(recv_initial_metadata_ready == nullptr);
+  GPR_ASSERT(recv_message_ready == nullptr);
+  GPR_ASSERT(recv_trailing_metadata_finished == nullptr);
+  grpc_slice_buffer_destroy_internal(&flow_controlled_buffer);
+  GRPC_ERROR_UNREF(read_closed_error);
+  GRPC_ERROR_UNREF(write_closed_error);
+  GRPC_ERROR_UNREF(byte_stream_error);
 
-  s->flow_control.Destroy();
+  flow_control.Destroy();
 
   if (t->resource_user != nullptr) {
     grpc_resource_user_free(t->resource_user, GRPC_RESOURCE_QUOTA_CALL_SIZE);
   }
 
   GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream");
+  GRPC_CLOSURE_SCHED(destroy_stream_arg, GRPC_ERROR_NONE);
+}
+
+static int init_stream(grpc_transport* gt, grpc_stream* gs,
+                       grpc_stream_refcount* refcount, const void* server_data,
+                       gpr_arena* arena) {
+  GPR_TIMER_SCOPE("init_stream", 0);
+  grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
+  new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);
+  return 0;
+}
 
-  GRPC_CLOSURE_SCHED(s->destroy_stream_arg, GRPC_ERROR_NONE);
+static void destroy_stream_locked(void* sp, grpc_error* error) {
+  GPR_TIMER_SCOPE("destroy_stream", 0);
+  grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(sp);
+  s->~grpc_chttp2_stream();
 }
 
 static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
@@ -1726,8 +1703,8 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
     gpr_free(str);
   }
 
-  op->handler_private.extra_arg = gs;
   GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
+  op->handler_private.extra_arg = gs;
   GRPC_CLOSURE_SCHED(
       GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked,
                         op, grpc_combiner_scheduler(t->combiner)),
@@ -2733,6 +2710,7 @@ static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
         grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
       t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
       GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
+      grpc_timer_init_unset(&t->keepalive_watchdog_timer);
       send_keepalive_ping_locked(t);
       grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
     } else {
@@ -3212,9 +3190,8 @@ intptr_t grpc_chttp2_transport_get_socket_uuid(grpc_transport* transport) {
 grpc_transport* grpc_create_chttp2_transport(
     const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
     grpc_resource_user* resource_user) {
-  grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(
-      gpr_zalloc(sizeof(grpc_chttp2_transport)));
-  init_transport(t, channel_args, ep, is_client, resource_user);
+  auto t = new (gpr_malloc(sizeof(grpc_chttp2_transport)))
+      grpc_chttp2_transport(channel_args, ep, is_client, resource_user);
   return &t->base;
 }
 
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.cc b/src/core/ext/transport/chttp2/transport/frame_data.cc
index 933b32c03c0..1de00735cf3 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_data.cc
@@ -32,18 +32,12 @@
 #include "src/core/lib/slice/slice_string_helpers.h"
 #include "src/core/lib/transport/transport.h"
 
-grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser) {
-  parser->state = GRPC_CHTTP2_DATA_FH_0;
-  parser->parsing_frame = nullptr;
-  return GRPC_ERROR_NONE;
-}
-
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser) {
-  if (parser->parsing_frame != nullptr) {
-    GRPC_ERROR_UNREF(parser->parsing_frame->Finished(
+grpc_chttp2_data_parser::~grpc_chttp2_data_parser() {
+  if (parsing_frame != nullptr) {
+    GRPC_ERROR_UNREF(parsing_frame->Finished(
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"), false));
   }
-  GRPC_ERROR_UNREF(parser->error);
+  GRPC_ERROR_UNREF(error);
 }
 
 grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index e5d01f764e7..2c5da99fa68 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -43,20 +43,18 @@ namespace grpc_core {
 class Chttp2IncomingByteStream;
 }  // namespace grpc_core
 
-typedef struct {
-  grpc_chttp2_stream_state state;
-  uint8_t frame_type;
-  uint32_t frame_size;
-  grpc_error* error;
+struct grpc_chttp2_data_parser {
+  grpc_chttp2_data_parser() = default;
+  ~grpc_chttp2_data_parser();
 
-  bool is_frame_compressed;
-  grpc_core::Chttp2IncomingByteStream* parsing_frame;
-} grpc_chttp2_data_parser;
+  grpc_chttp2_stream_state state = GRPC_CHTTP2_DATA_FH_0;
+  uint8_t frame_type = 0;
+  uint32_t frame_size = 0;
+  grpc_error* error = GRPC_ERROR_NONE;
 
-/* initialize per-stream state for data frame parsing */
-grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser);
-
-void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser);
+  bool is_frame_compressed = false;
+  grpc_core::Chttp2IncomingByteStream* parsing_frame = nullptr;
+};
 
 /* start processing a new data frame */
 grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index 4d7dfd900fb..dca15e76803 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -27,18 +27,6 @@
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 
-void grpc_chttp2_incoming_metadata_buffer_init(
-    grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena) {
-  buffer->arena = arena;
-  grpc_metadata_batch_init(&buffer->batch);
-  buffer->batch.deadline = GRPC_MILLIS_INF_FUTURE;
-}
-
-void grpc_chttp2_incoming_metadata_buffer_destroy(
-    grpc_chttp2_incoming_metadata_buffer* buffer) {
-  grpc_metadata_batch_destroy(&buffer->batch);
-}
-
 grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
     grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) {
   buffer->size += GRPC_MDELEM_LENGTH(elem);
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index d029cf00d46..c551b3cc8be 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -23,17 +23,20 @@
 
 #include "src/core/lib/transport/transport.h"
 
-typedef struct {
+struct grpc_chttp2_incoming_metadata_buffer {
+  grpc_chttp2_incoming_metadata_buffer(gpr_arena* arena) : arena(arena) {
+    grpc_metadata_batch_init(&batch);
+    batch.deadline = GRPC_MILLIS_INF_FUTURE;
+  }
+  ~grpc_chttp2_incoming_metadata_buffer() {
+    grpc_metadata_batch_destroy(&batch);
+  }
+
   gpr_arena* arena;
   grpc_metadata_batch batch;
-  size_t size;  // total size of metadata
-} grpc_chttp2_incoming_metadata_buffer;
-
-/** assumes everything initially zeroed */
-void grpc_chttp2_incoming_metadata_buffer_init(
-    grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena);
-void grpc_chttp2_incoming_metadata_buffer_destroy(
-    grpc_chttp2_incoming_metadata_buffer* buffer);
+  size_t size = 0;  // total size of metadata
+};
+
 void grpc_chttp2_incoming_metadata_buffer_publish(
     grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch);
 
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index 202017641b0..3ee408c103e 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -103,8 +103,8 @@ const char* grpc_chttp2_initiate_write_reason_string(
     grpc_chttp2_initiate_write_reason reason);
 
 typedef struct {
-  grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT];
-  uint64_t inflight_id;
+  grpc_closure_list lists[GRPC_CHTTP2_PCL_COUNT] = {};
+  uint64_t inflight_id = 0;
 } grpc_chttp2_ping_queue;
 
 typedef struct {
@@ -280,6 +280,11 @@ typedef enum {
 } grpc_chttp2_keepalive_state;
 
 struct grpc_chttp2_transport {
+  grpc_chttp2_transport(const grpc_channel_args* channel_args,
+                        grpc_endpoint* ep, bool is_client,
+                        grpc_resource_user* resource_user);
+  ~grpc_chttp2_transport();
+
   grpc_transport base; /* must be first */
   gpr_refcount refs;
   grpc_endpoint* ep;
@@ -289,27 +294,27 @@ struct grpc_chttp2_transport {
 
   grpc_combiner* combiner;
 
-  grpc_closure* notify_on_receive_settings;
+  grpc_closure* notify_on_receive_settings = nullptr;
 
   /** write execution state of the transport */
-  grpc_chttp2_write_state write_state;
+  grpc_chttp2_write_state write_state = GRPC_CHTTP2_WRITE_STATE_IDLE;
   /** is this the first write in a series of writes?
       set when we initiate writing from idle, cleared when we
       initiate writing from writing+more */
-  bool is_first_write_in_batch;
+  bool is_first_write_in_batch = false;
 
   /** is the transport destroying itself? */
-  uint8_t destroying;
+  uint8_t destroying = false;
   /** has the upper layer closed the transport? */
-  grpc_error* closed_with_error;
+  grpc_error* closed_with_error = GRPC_ERROR_NONE;
 
   /** is there a read request to the endpoint outstanding? */
-  uint8_t endpoint_reading;
+  uint8_t endpoint_reading = 1;
 
-  grpc_chttp2_optimization_target opt_target;
+  grpc_chttp2_optimization_target opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
 
   /** various lists of streams */
-  grpc_chttp2_stream_list lists[STREAM_LIST_COUNT];
+  grpc_chttp2_stream_list lists[STREAM_LIST_COUNT] = {};
 
   /** maps stream id to grpc_chttp2_stream objects */
   grpc_chttp2_stream_map stream_map;
@@ -326,7 +331,7 @@ struct grpc_chttp2_transport {
   /** address to place a newly accepted stream - set and unset by
       grpc_chttp2_parsing_accept_stream; used by init_stream to
       publish the accepted server stream */
-  grpc_chttp2_stream** accepting_stream;
+  grpc_chttp2_stream** accepting_stream = nullptr;
 
   struct {
     /* accept stream callback */
@@ -350,41 +355,43 @@ struct grpc_chttp2_transport {
 
   /** how much data are we willing to buffer when the WRITE_BUFFER_HINT is set?
    */
-  uint32_t write_buffer_size;
+  uint32_t write_buffer_size = grpc_core::chttp2::kDefaultWindow;
 
   /** Set to a grpc_error object if a goaway frame is received. By default, set
    * to GRPC_ERROR_NONE */
-  grpc_error* goaway_error;
+  grpc_error* goaway_error = GRPC_ERROR_NONE;
 
-  grpc_chttp2_sent_goaway_state sent_goaway_state;
+  grpc_chttp2_sent_goaway_state sent_goaway_state = GRPC_CHTTP2_NO_GOAWAY_SEND;
 
   /** are the local settings dirty and need to be sent? */
-  bool dirtied_local_settings;
+  bool dirtied_local_settings = true;
   /** have local settings been sent? */
-  bool sent_local_settings;
-  /** bitmask of setting indexes to send out */
-  uint32_t force_send_settings;
+  bool sent_local_settings = false;
+  /** bitmask of setting indexes to send out
+      Hack: it's common for implementations to assume 65536 bytes initial send
+      window -- this should by rights be 0 */
+  uint32_t force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
   /** settings values */
   uint32_t settings[GRPC_NUM_SETTING_SETS][GRPC_CHTTP2_NUM_SETTINGS];
 
   /** what is the next stream id to be allocated by this peer?
       copied to next_stream_id in parsing when parsing commences */
-  uint32_t next_stream_id;
+  uint32_t next_stream_id = 0;
 
   /** last new stream id */
-  uint32_t last_new_stream_id;
+  uint32_t last_new_stream_id = 0;
 
   /** ping queues for various ping insertion points */
-  grpc_chttp2_ping_queue ping_queue;
+  grpc_chttp2_ping_queue ping_queue = grpc_chttp2_ping_queue();
   grpc_chttp2_repeated_ping_policy ping_policy;
   grpc_chttp2_repeated_ping_state ping_state;
-  uint64_t ping_ctr; /* unique id for pings */
+  uint64_t ping_ctr = 0; /* unique id for pings */
   grpc_closure retry_initiate_ping_locked;
 
   /** ping acks */
-  size_t ping_ack_count;
-  size_t ping_ack_capacity;
-  uint64_t* ping_acks;
+  size_t ping_ack_count = 0;
+  size_t ping_ack_capacity = 0;
+  uint64_t* ping_acks = nullptr;
   grpc_chttp2_server_ping_recv_state ping_recv_state;
 
   /** parser for headers */
@@ -410,22 +417,22 @@ struct grpc_chttp2_transport {
   int64_t initial_window_update = 0;
 
   /* deframing */
-  grpc_chttp2_deframe_transport_state deframe_state;
-  uint8_t incoming_frame_type;
-  uint8_t incoming_frame_flags;
-  uint8_t header_eof;
-  bool is_first_frame;
-  uint32_t expect_continuation_stream_id;
-  uint32_t incoming_frame_size;
-  uint32_t incoming_stream_id;
+  grpc_chttp2_deframe_transport_state deframe_state = GRPC_DTS_CLIENT_PREFIX_0;
+  uint8_t incoming_frame_type = 0;
+  uint8_t incoming_frame_flags = 0;
+  uint8_t header_eof = 0;
+  bool is_first_frame = true;
+  uint32_t expect_continuation_stream_id = 0;
+  uint32_t incoming_frame_size = 0;
+  uint32_t incoming_stream_id = 0;
 
   /* active parser */
-  void* parser_data;
-  grpc_chttp2_stream* incoming_stream;
+  void* parser_data = nullptr;
+  grpc_chttp2_stream* incoming_stream = nullptr;
   grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t,
                         grpc_chttp2_stream* s, grpc_slice slice, int is_last);
 
-  grpc_chttp2_write_cb* write_cb_pool;
+  grpc_chttp2_write_cb* write_cb_pool = nullptr;
 
   /* bdp estimator */
   grpc_closure next_bdp_ping_timer_expired_locked;
@@ -434,23 +441,23 @@ struct grpc_chttp2_transport {
 
   /* if non-NULL, close the transport with this error when writes are finished
    */
-  grpc_error* close_transport_on_writes_finished;
+  grpc_error* close_transport_on_writes_finished = GRPC_ERROR_NONE;
 
   /* a list of closures to run after writes are finished */
-  grpc_closure_list run_after_write;
+  grpc_closure_list run_after_write = GRPC_CLOSURE_LIST_INIT;
 
   /* buffer pool state */
   /** have we scheduled a benign cleanup? */
-  bool benign_reclaimer_registered;
+  bool benign_reclaimer_registered = false;
   /** have we scheduled a destructive cleanup? */
-  bool destructive_reclaimer_registered;
+  bool destructive_reclaimer_registered = false;
   /** benign cleanup closure */
   grpc_closure benign_reclaimer_locked;
   /** destructive cleanup closure */
   grpc_closure destructive_reclaimer_locked;
 
   /* next bdp ping timer */
-  bool have_next_bdp_ping_timer;
+  bool have_next_bdp_ping_timer = false;
   grpc_timer next_bdp_ping_timer;
 
   /* keep-alive ping support */
@@ -471,12 +478,12 @@ struct grpc_chttp2_transport {
   /** grace period for a ping to complete before watchdog kicks in */
   grpc_millis keepalive_timeout;
   /** if keepalive pings are allowed when there's no outstanding streams */
-  bool keepalive_permit_without_calls;
+  bool keepalive_permit_without_calls = false;
   /** keep-alive state machine state */
   grpc_chttp2_keepalive_state keepalive_state;
 
   grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode> channelz_socket;
-  uint32_t num_messages_in_next_write;
+  uint32_t num_messages_in_next_write = 0;
 };
 
 typedef enum {
@@ -487,6 +494,10 @@ typedef enum {
 } grpc_published_metadata_method;
 
 struct grpc_chttp2_stream {
+  grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_stream_refcount* refcount,
+                     const void* server_data, gpr_arena* arena);
+  ~grpc_chttp2_stream();
+
   grpc_chttp2_transport* t;
   grpc_stream_refcount* refcount;
 
@@ -494,63 +505,63 @@ struct grpc_chttp2_stream {
   grpc_closure* destroy_stream_arg;
 
   grpc_chttp2_stream_link links[STREAM_LIST_COUNT];
-  uint8_t included[STREAM_LIST_COUNT];
+  uint8_t included[STREAM_LIST_COUNT] = {};
 
   /** HTTP2 stream id for this stream, or zero if one has not been assigned */
-  uint32_t id;
+  uint32_t id = 0;
 
   /** things the upper layers would like to send */
-  grpc_metadata_batch* send_initial_metadata;
-  grpc_closure* send_initial_metadata_finished;
-  grpc_metadata_batch* send_trailing_metadata;
-  grpc_closure* send_trailing_metadata_finished;
+  grpc_metadata_batch* send_initial_metadata = nullptr;
+  grpc_closure* send_initial_metadata_finished = nullptr;
+  grpc_metadata_batch* send_trailing_metadata = nullptr;
+  grpc_closure* send_trailing_metadata_finished = nullptr;
 
   grpc_core::OrphanablePtr<grpc_core::ByteStream> fetching_send_message;
-  uint32_t fetched_send_message_length;
-  grpc_slice fetching_slice;
+  uint32_t fetched_send_message_length = 0;
+  grpc_slice fetching_slice = grpc_empty_slice();
   int64_t next_message_end_offset;
-  int64_t flow_controlled_bytes_written;
-  int64_t flow_controlled_bytes_flowed;
+  int64_t flow_controlled_bytes_written = 0;
+  int64_t flow_controlled_bytes_flowed = 0;
   grpc_closure complete_fetch_locked;
-  grpc_closure* fetching_send_message_finished;
+  grpc_closure* fetching_send_message_finished = nullptr;
 
   grpc_metadata_batch* recv_initial_metadata;
-  grpc_closure* recv_initial_metadata_ready;
-  bool* trailing_metadata_available;
+  grpc_closure* recv_initial_metadata_ready = nullptr;
+  bool* trailing_metadata_available = nullptr;
   grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
-  grpc_closure* recv_message_ready;
+  grpc_closure* recv_message_ready = nullptr;
   grpc_metadata_batch* recv_trailing_metadata;
-  grpc_closure* recv_trailing_metadata_finished;
+  grpc_closure* recv_trailing_metadata_finished = nullptr;
 
-  grpc_transport_stream_stats* collecting_stats;
-  grpc_transport_stream_stats stats;
+  grpc_transport_stream_stats* collecting_stats = nullptr;
+  grpc_transport_stream_stats stats = grpc_transport_stream_stats();
 
   /** Is this stream closed for writing. */
-  bool write_closed;
+  bool write_closed = false;
   /** Is this stream reading half-closed. */
-  bool read_closed;
+  bool read_closed = false;
   /** Are all published incoming byte streams closed. */
-  bool all_incoming_byte_streams_finished;
+  bool all_incoming_byte_streams_finished = false;
   /** Has this stream seen an error.
       If true, then pending incoming frames can be thrown away. */
-  bool seen_error;
+  bool seen_error = false;
   /** Are we buffering writes on this stream? If yes, we won't become writable
       until there's enough queued up in the flow_controlled_buffer */
-  bool write_buffering;
+  bool write_buffering = false;
   /** Has trailing metadata been received. */
-  bool received_trailing_metadata;
+  bool received_trailing_metadata = false;
 
   /* have we sent or received the EOS bit? */
-  bool eos_received;
-  bool eos_sent;
+  bool eos_received = false;
+  bool eos_sent = false;
 
   /** the error that resulted in this stream being read-closed */
-  grpc_error* read_closed_error;
+  grpc_error* read_closed_error = GRPC_ERROR_NONE;
   /** the error that resulted in this stream being write-closed */
-  grpc_error* write_closed_error;
+  grpc_error* write_closed_error = GRPC_ERROR_NONE;
 
-  grpc_published_metadata_method published_metadata[2];
-  bool final_metadata_requested;
+  grpc_published_metadata_method published_metadata[2] = {};
+  bool final_metadata_requested = false;
 
   grpc_chttp2_incoming_metadata_buffer metadata_buffer[2];
 
@@ -560,33 +571,33 @@ struct grpc_chttp2_stream {
    * Accessed only by application thread when stream->pending_byte_stream ==
    * true */
   grpc_slice_buffer unprocessed_incoming_frames_buffer;
-  grpc_closure* on_next;    /* protected by t combiner */
-  bool pending_byte_stream; /* protected by t combiner */
+  grpc_closure* on_next = nullptr;  /* protected by t combiner */
+  bool pending_byte_stream = false; /* protected by t combiner */
   // cached length of buffer to be used by the transport thread in cases where
   // stream->pending_byte_stream == true. The value is saved before
   // application threads are allowed to modify
   // unprocessed_incoming_frames_buffer
-  size_t unprocessed_incoming_frames_buffer_cached_length;
+  size_t unprocessed_incoming_frames_buffer_cached_length = 0;
   grpc_closure reset_byte_stream;
-  grpc_error* byte_stream_error; /* protected by t combiner */
-  bool received_last_frame;      /* protected by t combiner */
+  grpc_error* byte_stream_error = GRPC_ERROR_NONE; /* protected by t combiner */
+  bool received_last_frame = false;                /* protected by t combiner */
 
-  grpc_millis deadline;
+  grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
 
   /** saw some stream level error */
-  grpc_error* forced_close_error;
+  grpc_error* forced_close_error = GRPC_ERROR_NONE;
   /** how many header frames have we received? */
-  uint8_t header_frames_received;
+  uint8_t header_frames_received = 0;
   /** parsing state for data frames */
   /* Accessed only by transport thread when stream->pending_byte_stream == false
    * Accessed only by application thread when stream->pending_byte_stream ==
    * true */
   grpc_chttp2_data_parser data_parser;
   /** number of bytes received - reset at end of parse thread execution */
-  int64_t received_bytes;
+  int64_t received_bytes = 0;
 
-  bool sent_initial_metadata;
-  bool sent_trailing_metadata;
+  bool sent_initial_metadata = false;
+  bool sent_trailing_metadata = false;
 
   grpc_core::PolymorphicManualConstructor<
       grpc_core::chttp2::StreamFlowControlBase,
@@ -596,32 +607,34 @@ struct grpc_chttp2_stream {
 
   grpc_slice_buffer flow_controlled_buffer;
 
-  grpc_chttp2_write_cb* on_flow_controlled_cbs;
-  grpc_chttp2_write_cb* on_write_finished_cbs;
-  grpc_chttp2_write_cb* finish_after_write;
-  size_t sending_bytes;
+  grpc_chttp2_write_cb* on_flow_controlled_cbs = nullptr;
+  grpc_chttp2_write_cb* on_write_finished_cbs = nullptr;
+  grpc_chttp2_write_cb* finish_after_write = nullptr;
+  size_t sending_bytes = 0;
 
   /* Stream compression method to be used. */
-  grpc_stream_compression_method stream_compression_method;
+  grpc_stream_compression_method stream_compression_method =
+      GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
   /* Stream decompression method to be used. */
-  grpc_stream_compression_method stream_decompression_method;
+  grpc_stream_compression_method stream_decompression_method =
+      GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS;
   /** Stream compression decompress context */
-  grpc_stream_compression_context* stream_decompression_ctx;
+  grpc_stream_compression_context* stream_decompression_ctx = nullptr;
   /** Stream compression compress context */
-  grpc_stream_compression_context* stream_compression_ctx;
+  grpc_stream_compression_context* stream_compression_ctx = nullptr;
 
   /** Buffer storing data that is compressed but not sent */
   grpc_slice_buffer compressed_data_buffer;
   /** Amount of uncompressed bytes sent out when compressed_data_buffer is
    * emptied */
-  size_t uncompressed_data_size;
+  size_t uncompressed_data_size = 0;
   /** Temporary buffer storing decompressed data */
   grpc_slice_buffer decompressed_data_buffer;
   /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed
    */
-  bool unprocessed_incoming_frames_decompressed;
+  bool unprocessed_incoming_frames_decompressed = false;
   /** gRPC header bytes that are already decompressed */
-  size_t decompressed_header_bytes;
+  size_t decompressed_header_bytes = 0;
 };
 
 /** Transport writing call flow:
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index 81e2634e3a7..349d8681d55 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -111,16 +111,21 @@ typedef struct grpc_cronet_transport grpc_cronet_transport;
 /* TODO (makdharma): reorder structure for memory efficiency per
    http://www.catb.org/esr/structure-packing/#_structure_reordering: */
 struct read_state {
+  read_state(gpr_arena* arena)
+      : trailing_metadata(arena), initial_metadata(arena) {
+    grpc_slice_buffer_init(&read_slice_buffer);
+  }
+
   /* vars to store data coming from server */
-  char* read_buffer;
-  bool length_field_received;
-  int received_bytes;
-  int remaining_bytes;
-  int length_field;
-  bool compressed;
-  char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES];
-  char* payload_field;
-  bool read_stream_closed;
+  char* read_buffer = nullptr;
+  bool length_field_received = false;
+  int received_bytes = 0;
+  int remaining_bytes = 0;
+  int length_field = 0;
+  bool compressed = 0;
+  char grpc_header_bytes[GRPC_HEADER_SIZE_IN_BYTES] = {};
+  char* payload_field = nullptr;
+  bool read_stream_closed = 0;
 
   /* vars for holding data destined for the application */
   grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sbs;
@@ -128,59 +133,71 @@ struct read_state {
 
   /* vars for trailing metadata */
   grpc_chttp2_incoming_metadata_buffer trailing_metadata;
-  bool trailing_metadata_valid;
+  bool trailing_metadata_valid = false;
 
   /* vars for initial metadata */
   grpc_chttp2_incoming_metadata_buffer initial_metadata;
 };
 
 struct write_state {
-  char* write_buffer;
+  char* write_buffer = nullptr;
 };
 
 /* track state of one stream op */
 struct op_state {
-  bool state_op_done[OP_NUM_OPS];
-  bool state_callback_received[OP_NUM_OPS];
+  op_state(gpr_arena* arena) : rs(arena) {}
+
+  bool state_op_done[OP_NUM_OPS] = {};
+  bool state_callback_received[OP_NUM_OPS] = {};
   /* A non-zero gRPC status code has been seen */
-  bool fail_state;
+  bool fail_state = false;
   /* Transport is discarding all buffered messages */
-  bool flush_read;
-  bool flush_cronet_when_ready;
-  bool pending_write_for_trailer;
-  bool pending_send_message;
+  bool flush_read = false;
+  bool flush_cronet_when_ready = false;
+  bool pending_write_for_trailer = false;
+  bool pending_send_message = false;
   /* User requested RECV_TRAILING_METADATA */
-  bool pending_recv_trailing_metadata;
+  bool pending_recv_trailing_metadata = false;
   /* Cronet has not issued a callback of a bidirectional read */
-  bool pending_read_from_cronet;
-  grpc_error* cancel_error;
+  bool pending_read_from_cronet = false;
+  grpc_error* cancel_error = GRPC_ERROR_NONE;
   /* data structure for storing data coming from server */
   struct read_state rs;
   /* data structure for storing data going to the server */
   struct write_state ws;
 };
 
+struct stream_obj;
+
 struct op_and_state {
+  op_and_state(stream_obj* s, const grpc_transport_stream_op_batch& op);
+
   grpc_transport_stream_op_batch op;
   struct op_state state;
-  bool done;
-  struct stream_obj* s;      /* Pointer back to the stream object */
-  struct op_and_state* next; /* next op_and_state in the linked list */
+  bool done = false;
+  struct stream_obj* s; /* Pointer back to the stream object */
+  /* next op_and_state in the linked list */
+  struct op_and_state* next;
 };
 
 struct op_storage {
-  int num_pending_ops;
-  struct op_and_state* head;
+  int num_pending_ops = 0;
+  struct op_and_state* head = nullptr;
 };
 
 struct stream_obj {
+  stream_obj(grpc_transport* gt, grpc_stream* gs,
+             grpc_stream_refcount* refcount, gpr_arena* arena);
+  ~stream_obj();
+
   gpr_arena* arena;
-  struct op_and_state* oas;
-  grpc_transport_stream_op_batch* curr_op;
+  struct op_and_state* oas = nullptr;
+  grpc_transport_stream_op_batch* curr_op = nullptr;
   grpc_cronet_transport* curr_ct;
   grpc_stream* curr_gs;
-  bidirectional_stream* cbs;
-  bidirectional_stream_header_array header_array;
+  bidirectional_stream* cbs = nullptr;
+  bidirectional_stream_header_array header_array =
+      bidirectional_stream_header_array();  // Zero-initialize the structure.
 
   /* Stream level state. Some state will be tracked both at stream and stream_op
    * level */
@@ -195,7 +212,6 @@ struct stream_obj {
   /* Refcount object of the stream */
   grpc_stream_refcount* refcount;
 };
-typedef struct stream_obj stream_obj;
 
 #ifndef NDEBUG
 #define GRPC_CRONET_STREAM_REF(stream, reason) \
@@ -306,6 +322,10 @@ static grpc_error* make_error_with_desc(int error_code, const char* desc) {
   return error;
 }
 
+inline op_and_state::op_and_state(stream_obj* s,
+                                  const grpc_transport_stream_op_batch& op)
+    : op(op), state(s->arena), s(s), next(s->storage.head) {}
+
 /*
   Add a new stream op to op storage.
 */
@@ -314,14 +334,8 @@ static void add_to_storage(struct stream_obj* s,
   struct op_storage* storage = &s->storage;
   /* add new op at the beginning of the linked list. The memory is freed
   in remove_from_storage */
-  struct op_and_state* new_op = static_cast<struct op_and_state*>(
-      gpr_malloc(sizeof(struct op_and_state)));
-  memcpy(&new_op->op, op, sizeof(grpc_transport_stream_op_batch));
-  memset(&new_op->state, 0, sizeof(new_op->state));
-  new_op->s = s;
-  new_op->done = false;
+  op_and_state* new_op = grpc_core::New<op_and_state>(s, *op);
   gpr_mu_lock(&s->mu);
-  new_op->next = storage->head;
   storage->head = new_op;
   storage->num_pending_ops++;
   if (op->send_message) {
@@ -347,7 +361,7 @@ static void remove_from_storage(struct stream_obj* s,
   }
   if (s->storage.head == oas) {
     s->storage.head = oas->next;
-    gpr_free(oas);
+    grpc_core::Delete(oas);
     s->storage.num_pending_ops--;
     CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
                s->storage.num_pending_ops);
@@ -358,7 +372,7 @@ static void remove_from_storage(struct stream_obj* s,
         s->storage.num_pending_ops--;
         CRONET_LOG(GPR_DEBUG, "Freed %p. Now %d in the queue", oas,
                    s->storage.num_pending_ops);
-        gpr_free(oas);
+        grpc_core::Delete(oas);
         break;
       } else if (GPR_UNLIKELY(curr->next == nullptr)) {
         CRONET_LOG(GPR_ERROR, "Reached end of LL and did not find op to free");
@@ -540,10 +554,6 @@ static void on_response_headers_received(
   }
 
   gpr_mu_lock(&s->mu);
-  memset(&s->state.rs.initial_metadata, 0,
-         sizeof(s->state.rs.initial_metadata));
-  grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
-                                            s->arena);
   convert_cronet_array_to_metadata(headers, &s->state.rs.initial_metadata);
   s->state.state_callback_received[OP_RECV_INITIAL_METADATA] = true;
   if (!(s->state.state_op_done[OP_CANCEL_ERROR] ||
@@ -634,11 +644,7 @@ static void on_response_trailers_received(
   stream_obj* s = static_cast<stream_obj*>(stream->annotation);
   grpc_cronet_transport* t = s->curr_ct;
   gpr_mu_lock(&s->mu);
-  memset(&s->state.rs.trailing_metadata, 0,
-         sizeof(s->state.rs.trailing_metadata));
   s->state.rs.trailing_metadata_valid = false;
-  grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.trailing_metadata,
-                                            s->arena);
   convert_cronet_array_to_metadata(trailers, &s->state.rs.trailing_metadata);
   if (trailers->count > 0) {
     s->state.rs.trailing_metadata_valid = true;
@@ -1354,36 +1360,28 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
   Functions used by upper layers to access transport functionality.
 */
 
+inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
+                              grpc_stream_refcount* refcount, gpr_arena* arena)
+    : arena(arena),
+      curr_ct(reinterpret_cast<grpc_cronet_transport*>(gt)),
+      curr_gs(gs),
+      state(arena),
+      refcount(refcount) {
+  GRPC_CRONET_STREAM_REF(this, "cronet transport");
+  gpr_mu_init(&mu);
+}
+
+inline stream_obj::~stream_obj() {
+  null_and_maybe_free_read_buffer(this);
+  /* Clean up read_slice_buffer in case there is unread data. */
+  grpc_slice_buffer_destroy_internal(&state.rs.read_slice_buffer);
+  GRPC_ERROR_UNREF(state.cancel_error);
+}
+
 static int init_stream(grpc_transport* gt, grpc_stream* gs,
                        grpc_stream_refcount* refcount, const void* server_data,
                        gpr_arena* arena) {
-  stream_obj* s = reinterpret_cast<stream_obj*>(gs);
-
-  s->refcount = refcount;
-  GRPC_CRONET_STREAM_REF(s, "cronet transport");
-  memset(&s->storage, 0, sizeof(s->storage));
-  s->storage.head = nullptr;
-  memset(&s->state, 0, sizeof(s->state));
-  s->curr_op = nullptr;
-  s->cbs = nullptr;
-  memset(&s->header_array, 0, sizeof(s->header_array));
-  memset(&s->state.rs, 0, sizeof(s->state.rs));
-  memset(&s->state.ws, 0, sizeof(s->state.ws));
-  memset(s->state.state_op_done, 0, sizeof(s->state.state_op_done));
-  memset(s->state.state_callback_received, 0,
-         sizeof(s->state.state_callback_received));
-  s->state.fail_state = s->state.flush_read = false;
-  s->state.cancel_error = nullptr;
-  s->state.flush_cronet_when_ready = s->state.pending_write_for_trailer = false;
-  s->state.pending_send_message = false;
-  s->state.pending_recv_trailing_metadata = false;
-  s->state.pending_read_from_cronet = false;
-
-  s->curr_gs = gs;
-  s->curr_ct = reinterpret_cast<grpc_cronet_transport*>(gt);
-  s->arena = arena;
-
-  gpr_mu_init(&s->mu);
+  new (gs) stream_obj(gt, gs, refcount, arena);
   return 0;
 }
 
@@ -1426,10 +1424,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
 static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
                            grpc_closure* then_schedule_closure) {
   stream_obj* s = reinterpret_cast<stream_obj*>(gs);
-  null_and_maybe_free_read_buffer(s);
-  /* Clean up read_slice_buffer in case there is unread data. */
-  grpc_slice_buffer_destroy_internal(&s->state.rs.read_slice_buffer);
-  GRPC_ERROR_UNREF(s->state.cancel_error);
+  s->~stream_obj();
   GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 9dbd0958436..1c4e2e79fe6 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -40,18 +40,68 @@
     if (grpc_inproc_trace.enabled()) gpr_log(__VA_ARGS__); \
   } while (0)
 
-static grpc_slice g_empty_slice;
-static grpc_slice g_fake_path_key;
-static grpc_slice g_fake_path_value;
-static grpc_slice g_fake_auth_key;
-static grpc_slice g_fake_auth_value;
+namespace {
+grpc_slice g_empty_slice;
+grpc_slice g_fake_path_key;
+grpc_slice g_fake_path_value;
+grpc_slice g_fake_auth_key;
+grpc_slice g_fake_auth_value;
+
+struct inproc_stream;
+bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
+void op_state_machine(void* arg, grpc_error* error);
+void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
+                  bool is_initial);
+grpc_error* fill_in_metadata(inproc_stream* s,
+                             const grpc_metadata_batch* metadata,
+                             uint32_t flags, grpc_metadata_batch* out_md,
+                             uint32_t* outflags, bool* markfilled);
+
+struct shared_mu {
+  shared_mu() {
+    // Share one lock between both sides since both sides get affected
+    gpr_mu_init(&mu);
+    gpr_ref_init(&refs, 2);
+  }
 
-typedef struct {
   gpr_mu mu;
   gpr_refcount refs;
-} shared_mu;
+};
+
+struct inproc_transport {
+  inproc_transport(const grpc_transport_vtable* vtable, shared_mu* mu,
+                   bool is_client)
+      : mu(mu), is_client(is_client) {
+    base.vtable = vtable;
+    // Start each side of transport with 2 refs since they each have a ref
+    // to the other
+    gpr_ref_init(&refs, 2);
+    grpc_connectivity_state_init(&connectivity, GRPC_CHANNEL_READY,
+                                 is_client ? "inproc_client" : "inproc_server");
+  }
+
+  ~inproc_transport() {
+    grpc_connectivity_state_destroy(&connectivity);
+    if (gpr_unref(&mu->refs)) {
+      gpr_free(mu);
+    }
+  }
+
+  void ref() {
+    INPROC_LOG(GPR_INFO, "ref_transport %p", this);
+    gpr_ref(&refs);
+  }
+
+  void unref() {
+    INPROC_LOG(GPR_INFO, "unref_transport %p", this);
+    if (!gpr_unref(&refs)) {
+      return;
+    }
+    INPROC_LOG(GPR_INFO, "really_destroy_transport %p", this);
+    this->~inproc_transport();
+    gpr_free(this);
+  }
 
-typedef struct inproc_transport {
   grpc_transport base;
   shared_mu* mu;
   gpr_refcount refs;
@@ -60,128 +110,174 @@ typedef struct inproc_transport {
   void (*accept_stream_cb)(void* user_data, grpc_transport* transport,
                            const void* server_data);
   void* accept_stream_data;
-  bool is_closed;
+  bool is_closed = false;
   struct inproc_transport* other_side;
-  struct inproc_stream* stream_list;
-} inproc_transport;
+  struct inproc_stream* stream_list = nullptr;
+};
+
+struct inproc_stream {
+  inproc_stream(inproc_transport* t, grpc_stream_refcount* refcount,
+                const void* server_data, gpr_arena* arena)
+      : t(t), refs(refcount), arena(arena) {
+    // Ref this stream right now for ctor and list.
+    ref("inproc_init_stream:init");
+    ref("inproc_init_stream:list");
+
+    grpc_metadata_batch_init(&to_read_initial_md);
+    grpc_metadata_batch_init(&to_read_trailing_md);
+    GRPC_CLOSURE_INIT(&op_closure, op_state_machine, this,
+                      grpc_schedule_on_exec_ctx);
+    grpc_metadata_batch_init(&write_buffer_initial_md);
+    grpc_metadata_batch_init(&write_buffer_trailing_md);
+
+    stream_list_prev = nullptr;
+    gpr_mu_lock(&t->mu->mu);
+    stream_list_next = t->stream_list;
+    if (t->stream_list) {
+      t->stream_list->stream_list_prev = this;
+    }
+    t->stream_list = this;
+    gpr_mu_unlock(&t->mu->mu);
+
+    if (!server_data) {
+      t->ref();
+      inproc_transport* st = t->other_side;
+      st->ref();
+      other_side = nullptr;  // will get filled in soon
+      // Pass the client-side stream address to the server-side for a ref
+      ref("inproc_init_stream:clt");  // ref it now on behalf of server
+                                      // side to avoid destruction
+      INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p",
+                 st->accept_stream_cb, st->accept_stream_data);
+      (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)this);
+    } else {
+      // This is the server-side and is being called through accept_stream_cb
+      inproc_stream* cs = (inproc_stream*)server_data;
+      other_side = cs;
+      // Ref the server-side stream on behalf of the client now
+      ref("inproc_init_stream:srv");
+
+      // Now we are about to affect the other side, so lock the transport
+      // to make sure that it doesn't get destroyed
+      gpr_mu_lock(&t->mu->mu);
+      cs->other_side = this;
+      // Now transfer from the other side's write_buffer if any to the to_read
+      // buffer
+      if (cs->write_buffer_initial_md_filled) {
+        fill_in_metadata(this, &cs->write_buffer_initial_md,
+                         cs->write_buffer_initial_md_flags, &to_read_initial_md,
+                         &to_read_initial_md_flags, &to_read_initial_md_filled);
+        deadline = GPR_MIN(deadline, cs->write_buffer_deadline);
+        grpc_metadata_batch_clear(&cs->write_buffer_initial_md);
+        cs->write_buffer_initial_md_filled = false;
+      }
+      if (cs->write_buffer_trailing_md_filled) {
+        fill_in_metadata(this, &cs->write_buffer_trailing_md, 0,
+                         &to_read_trailing_md, nullptr,
+                         &to_read_trailing_md_filled);
+        grpc_metadata_batch_clear(&cs->write_buffer_trailing_md);
+        cs->write_buffer_trailing_md_filled = false;
+      }
+      if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
+        cancel_other_error = cs->write_buffer_cancel_error;
+        cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
+      }
+
+      gpr_mu_unlock(&t->mu->mu);
+    }
+  }
+
+  ~inproc_stream() {
+    GRPC_ERROR_UNREF(write_buffer_cancel_error);
+    GRPC_ERROR_UNREF(cancel_self_error);
+    GRPC_ERROR_UNREF(cancel_other_error);
+
+    if (recv_inited) {
+      grpc_slice_buffer_destroy_internal(&recv_message);
+    }
+
+    t->unref();
+
+    if (closure_at_destroy) {
+      GRPC_CLOSURE_SCHED(closure_at_destroy, GRPC_ERROR_NONE);
+    }
+  }
+
+#ifndef NDEBUG
+#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason)
+#else
+#define STREAM_REF(refs, reason) grpc_stream_ref(refs)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
+#endif
+  void ref(const char* reason) {
+    INPROC_LOG(GPR_INFO, "ref_stream %p %s", this, reason);
+    STREAM_REF(refs, reason);
+  }
+
+  void unref(const char* reason) {
+    INPROC_LOG(GPR_INFO, "unref_stream %p %s", this, reason);
+    STREAM_UNREF(refs, reason);
+  }
+#undef STREAM_REF
+#undef STREAM_UNREF
 
-typedef struct inproc_stream {
   inproc_transport* t;
   grpc_metadata_batch to_read_initial_md;
-  uint32_t to_read_initial_md_flags;
-  bool to_read_initial_md_filled;
+  uint32_t to_read_initial_md_flags = 0;
+  bool to_read_initial_md_filled = false;
   grpc_metadata_batch to_read_trailing_md;
-  bool to_read_trailing_md_filled;
-  bool ops_needed;
-  bool op_closure_scheduled;
+  bool to_read_trailing_md_filled = false;
+  bool ops_needed = false;
+  bool op_closure_scheduled = false;
   grpc_closure op_closure;
   // Write buffer used only during gap at init time when client-side
   // stream is set up but server side stream is not yet set up
   grpc_metadata_batch write_buffer_initial_md;
-  bool write_buffer_initial_md_filled;
-  uint32_t write_buffer_initial_md_flags;
-  grpc_millis write_buffer_deadline;
+  bool write_buffer_initial_md_filled = false;
+  uint32_t write_buffer_initial_md_flags = 0;
+  grpc_millis write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
   grpc_metadata_batch write_buffer_trailing_md;
-  bool write_buffer_trailing_md_filled;
-  grpc_error* write_buffer_cancel_error;
+  bool write_buffer_trailing_md_filled = false;
+  grpc_error* write_buffer_cancel_error = GRPC_ERROR_NONE;
 
   struct inproc_stream* other_side;
-  bool other_side_closed;               // won't talk anymore
-  bool write_buffer_other_side_closed;  // on hold
+  bool other_side_closed = false;               // won't talk anymore
+  bool write_buffer_other_side_closed = false;  // on hold
   grpc_stream_refcount* refs;
-  grpc_closure* closure_at_destroy;
+  grpc_closure* closure_at_destroy = nullptr;
 
   gpr_arena* arena;
 
-  grpc_transport_stream_op_batch* send_message_op;
-  grpc_transport_stream_op_batch* send_trailing_md_op;
-  grpc_transport_stream_op_batch* recv_initial_md_op;
-  grpc_transport_stream_op_batch* recv_message_op;
-  grpc_transport_stream_op_batch* recv_trailing_md_op;
+  grpc_transport_stream_op_batch* send_message_op = nullptr;
+  grpc_transport_stream_op_batch* send_trailing_md_op = nullptr;
+  grpc_transport_stream_op_batch* recv_initial_md_op = nullptr;
+  grpc_transport_stream_op_batch* recv_message_op = nullptr;
+  grpc_transport_stream_op_batch* recv_trailing_md_op = nullptr;
 
   grpc_slice_buffer recv_message;
   grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> recv_stream;
-  bool recv_inited;
+  bool recv_inited = false;
 
-  bool initial_md_sent;
-  bool trailing_md_sent;
-  bool initial_md_recvd;
-  bool trailing_md_recvd;
+  bool initial_md_sent = false;
+  bool trailing_md_sent = false;
+  bool initial_md_recvd = false;
+  bool trailing_md_recvd = false;
 
-  bool closed;
+  bool closed = false;
 
-  grpc_error* cancel_self_error;
-  grpc_error* cancel_other_error;
+  grpc_error* cancel_self_error = GRPC_ERROR_NONE;
+  grpc_error* cancel_other_error = GRPC_ERROR_NONE;
 
-  grpc_millis deadline;
+  grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
 
-  bool listed;
+  bool listed = true;
   struct inproc_stream* stream_list_prev;
   struct inproc_stream* stream_list_next;
-} inproc_stream;
-
-static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
-static void op_state_machine(void* arg, grpc_error* error);
-
-static void ref_transport(inproc_transport* t) {
-  INPROC_LOG(GPR_INFO, "ref_transport %p", t);
-  gpr_ref(&t->refs);
-}
-
-static void really_destroy_transport(inproc_transport* t) {
-  INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
-  grpc_connectivity_state_destroy(&t->connectivity);
-  if (gpr_unref(&t->mu->refs)) {
-    gpr_free(t->mu);
-  }
-  gpr_free(t);
-}
+};
 
-static void unref_transport(inproc_transport* t) {
-  INPROC_LOG(GPR_INFO, "unref_transport %p", t);
-  if (gpr_unref(&t->refs)) {
-    really_destroy_transport(t);
-  }
-}
-
-#ifndef NDEBUG
-#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
-#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason)
-#else
-#define STREAM_REF(refs, reason) grpc_stream_ref(refs)
-#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
-#endif
-
-static void ref_stream(inproc_stream* s, const char* reason) {
-  INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
-  STREAM_REF(s->refs, reason);
-}
-
-static void unref_stream(inproc_stream* s, const char* reason) {
-  INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
-  STREAM_UNREF(s->refs, reason);
-}
-
-static void really_destroy_stream(inproc_stream* s) {
-  INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
-
-  GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
-  GRPC_ERROR_UNREF(s->cancel_self_error);
-  GRPC_ERROR_UNREF(s->cancel_other_error);
-
-  if (s->recv_inited) {
-    grpc_slice_buffer_destroy_internal(&s->recv_message);
-  }
-
-  unref_transport(s->t);
-
-  if (s->closure_at_destroy) {
-    GRPC_CLOSURE_SCHED(s->closure_at_destroy, GRPC_ERROR_NONE);
-  }
-}
-
-static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
-                         bool is_initial) {
+void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
+                  bool is_initial) {
   for (grpc_linked_mdelem* md = md_batch->list.head; md != nullptr;
        md = md->next) {
     char* key = grpc_slice_to_c_string(GRPC_MDKEY(md->md));
@@ -193,10 +289,10 @@ static void log_metadata(const grpc_metadata_batch* md_batch, bool is_client,
   }
 }
 
-static grpc_error* fill_in_metadata(inproc_stream* s,
-                                    const grpc_metadata_batch* metadata,
-                                    uint32_t flags, grpc_metadata_batch* out_md,
-                                    uint32_t* outflags, bool* markfilled) {
+grpc_error* fill_in_metadata(inproc_stream* s,
+                             const grpc_metadata_batch* metadata,
+                             uint32_t flags, grpc_metadata_batch* out_md,
+                             uint32_t* outflags, bool* markfilled) {
   if (grpc_inproc_trace.enabled()) {
     log_metadata(metadata, s->t->is_client, outflags != nullptr);
   }
@@ -221,109 +317,16 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
   return error;
 }
 
-static int init_stream(grpc_transport* gt, grpc_stream* gs,
-                       grpc_stream_refcount* refcount, const void* server_data,
-                       gpr_arena* arena) {
+int init_stream(grpc_transport* gt, grpc_stream* gs,
+                grpc_stream_refcount* refcount, const void* server_data,
+                gpr_arena* arena) {
   INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
   inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
-  inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
-  s->arena = arena;
-
-  s->refs = refcount;
-  // Ref this stream right now
-  ref_stream(s, "inproc_init_stream:init");
-
-  grpc_metadata_batch_init(&s->to_read_initial_md);
-  s->to_read_initial_md_flags = 0;
-  s->to_read_initial_md_filled = false;
-  grpc_metadata_batch_init(&s->to_read_trailing_md);
-  s->to_read_trailing_md_filled = false;
-  grpc_metadata_batch_init(&s->write_buffer_initial_md);
-  s->write_buffer_initial_md_flags = 0;
-  s->write_buffer_initial_md_filled = false;
-  grpc_metadata_batch_init(&s->write_buffer_trailing_md);
-  s->write_buffer_trailing_md_filled = false;
-  s->ops_needed = false;
-  s->op_closure_scheduled = false;
-  GRPC_CLOSURE_INIT(&s->op_closure, op_state_machine, s,
-                    grpc_schedule_on_exec_ctx);
-  s->t = t;
-  s->closure_at_destroy = nullptr;
-  s->other_side_closed = false;
-
-  s->initial_md_sent = s->trailing_md_sent = s->initial_md_recvd =
-      s->trailing_md_recvd = false;
-
-  s->closed = false;
-
-  s->cancel_self_error = GRPC_ERROR_NONE;
-  s->cancel_other_error = GRPC_ERROR_NONE;
-  s->write_buffer_cancel_error = GRPC_ERROR_NONE;
-  s->deadline = GRPC_MILLIS_INF_FUTURE;
-  s->write_buffer_deadline = GRPC_MILLIS_INF_FUTURE;
-
-  s->stream_list_prev = nullptr;
-  gpr_mu_lock(&t->mu->mu);
-  s->listed = true;
-  ref_stream(s, "inproc_init_stream:list");
-  s->stream_list_next = t->stream_list;
-  if (t->stream_list) {
-    t->stream_list->stream_list_prev = s;
-  }
-  t->stream_list = s;
-  gpr_mu_unlock(&t->mu->mu);
-
-  if (!server_data) {
-    ref_transport(t);
-    inproc_transport* st = t->other_side;
-    ref_transport(st);
-    s->other_side = nullptr;  // will get filled in soon
-    // Pass the client-side stream address to the server-side for a ref
-    ref_stream(s, "inproc_init_stream:clt");  // ref it now on behalf of server
-                                              // side to avoid destruction
-    INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
-               st->accept_stream_data);
-    (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
-  } else {
-    // This is the server-side and is being called through accept_stream_cb
-    inproc_stream* cs = (inproc_stream*)server_data;
-    s->other_side = cs;
-    // Ref the server-side stream on behalf of the client now
-    ref_stream(s, "inproc_init_stream:srv");
-
-    // Now we are about to affect the other side, so lock the transport
-    // to make sure that it doesn't get destroyed
-    gpr_mu_lock(&s->t->mu->mu);
-    cs->other_side = s;
-    // Now transfer from the other side's write_buffer if any to the to_read
-    // buffer
-    if (cs->write_buffer_initial_md_filled) {
-      fill_in_metadata(s, &cs->write_buffer_initial_md,
-                       cs->write_buffer_initial_md_flags,
-                       &s->to_read_initial_md, &s->to_read_initial_md_flags,
-                       &s->to_read_initial_md_filled);
-      s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
-      grpc_metadata_batch_clear(&cs->write_buffer_initial_md);
-      cs->write_buffer_initial_md_filled = false;
-    }
-    if (cs->write_buffer_trailing_md_filled) {
-      fill_in_metadata(s, &cs->write_buffer_trailing_md, 0,
-                       &s->to_read_trailing_md, nullptr,
-                       &s->to_read_trailing_md_filled);
-      grpc_metadata_batch_clear(&cs->write_buffer_trailing_md);
-      cs->write_buffer_trailing_md_filled = false;
-    }
-    if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
-      s->cancel_other_error = cs->write_buffer_cancel_error;
-      cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
-    }
-
-    gpr_mu_unlock(&s->t->mu->mu);
-  }
+  new (gs) inproc_stream(t, refcount, server_data, arena);
   return 0;  // return value is not important
 }
 
-static void close_stream_locked(inproc_stream* s) {
+void close_stream_locked(inproc_stream* s) {
   if (!s->closed) {
     // Release the metadata that we would have written out
     grpc_metadata_batch_destroy(&s->write_buffer_initial_md);
@@ -341,21 +344,21 @@ static void close_stream_locked(inproc_stream* s) {
         n->stream_list_prev = p;
       }
       s->listed = false;
-      unref_stream(s, "close_stream:list");
+      s->unref("close_stream:list");
     }
     s->closed = true;
-    unref_stream(s, "close_stream:closing");
+    s->unref("close_stream:closing");
   }
 }
 
 // This function means that we are done talking/listening to the other side
-static void close_other_side_locked(inproc_stream* s, const char* reason) {
+void close_other_side_locked(inproc_stream* s, const char* reason) {
   if (s->other_side != nullptr) {
     // First release the metadata that came from the other side's arena
     grpc_metadata_batch_destroy(&s->to_read_initial_md);
     grpc_metadata_batch_destroy(&s->to_read_trailing_md);
 
-    unref_stream(s->other_side, reason);
+    s->other_side->unref(reason);
     s->other_side_closed = true;
     s->other_side = nullptr;
   } else if (!s->other_side_closed) {
@@ -367,9 +370,9 @@ static void close_other_side_locked(inproc_stream* s, const char* reason) {
 // this stream_op_batch is only one of the pending operations for this
 // stream. This is called when one of the pending operations for the stream
 // is done and about to be NULLed out
-static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
-                                         grpc_transport_stream_op_batch* op,
-                                         const char* msg) {
+void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
+                                  grpc_transport_stream_op_batch* op,
+                                  const char* msg) {
   int is_sm = static_cast<int>(op == s->send_message_op);
   int is_stm = static_cast<int>(op == s->send_trailing_md_op);
   // TODO(vjpai): We should not consider the recv ops here, since they
@@ -386,8 +389,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
   }
 }
 
-static void maybe_schedule_op_closure_locked(inproc_stream* s,
-                                             grpc_error* error) {
+void maybe_schedule_op_closure_locked(inproc_stream* s, grpc_error* error) {
   if (s && s->ops_needed && !s->op_closure_scheduled) {
     GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_REF(error));
     s->op_closure_scheduled = true;
@@ -395,7 +397,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
   }
 }
 
-static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
+void fail_helper_locked(inproc_stream* s, grpc_error* error) {
   INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
   // If we're failing this side, we need to make sure that
   // we also send or have already sent trailing metadata
@@ -525,8 +527,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
 // that the incoming byte stream's next() call will always return
 // synchronously.  That assumption is true today but may not always be
 // true in the future.
-static void message_transfer_locked(inproc_stream* sender,
-                                    inproc_stream* receiver) {
+void message_transfer_locked(inproc_stream* sender, inproc_stream* receiver) {
   size_t remaining =
       sender->send_message_op->payload->send_message.send_message->length();
   if (receiver->recv_inited) {
@@ -572,7 +573,7 @@ static void message_transfer_locked(inproc_stream* sender,
   sender->send_message_op = nullptr;
 }
 
-static void op_state_machine(void* arg, grpc_error* error) {
+void op_state_machine(void* arg, grpc_error* error) {
   // This function gets called when we have contents in the unprocessed reads
   // Get what we want based on our ops wanted
   // Schedule our appropriate closures
@@ -847,7 +848,7 @@ done:
   GRPC_ERROR_UNREF(new_err);
 }
 
-static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
+bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
   bool ret = false;  // was the cancel accepted
   INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
   if (s->cancel_self_error == GRPC_ERROR_NONE) {
@@ -900,10 +901,10 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
   return ret;
 }
 
-static void do_nothing(void* arg, grpc_error* error) {}
+void do_nothing(void* arg, grpc_error* error) {}
 
-static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
-                              grpc_transport_stream_op_batch* op) {
+void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
+                       grpc_transport_stream_op_batch* op) {
   INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
   inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
   gpr_mu* mu = &s->t->mu->mu;  // save aside in case s gets closed
@@ -1083,7 +1084,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
   GRPC_ERROR_UNREF(error);
 }
 
-static void close_transport_locked(inproc_transport* t) {
+void close_transport_locked(inproc_transport* t) {
   INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
   grpc_connectivity_state_set(
       &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
@@ -1103,7 +1104,7 @@ static void close_transport_locked(inproc_transport* t) {
   }
 }
 
-static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
+void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
   inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
   INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
   gpr_mu_lock(&t->mu->mu);
@@ -1136,39 +1137,64 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
   gpr_mu_unlock(&t->mu->mu);
 }
 
-static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
-                           grpc_closure* then_schedule_closure) {
+void destroy_stream(grpc_transport* gt, grpc_stream* gs,
+                    grpc_closure* then_schedule_closure) {
   INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
   inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
   s->closure_at_destroy = then_schedule_closure;
-  really_destroy_stream(s);
+  s->~inproc_stream();
 }
 
-static void destroy_transport(grpc_transport* gt) {
+void destroy_transport(grpc_transport* gt) {
   inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
   INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
   gpr_mu_lock(&t->mu->mu);
   close_transport_locked(t);
   gpr_mu_unlock(&t->mu->mu);
-  unref_transport(t->other_side);
-  unref_transport(t);
+  t->other_side->unref();
+  t->unref();
 }
 
 /*******************************************************************************
  * INTEGRATION GLUE
  */
 
-static void set_pollset(grpc_transport* gt, grpc_stream* gs,
-                        grpc_pollset* pollset) {
+void set_pollset(grpc_transport* gt, grpc_stream* gs, grpc_pollset* pollset) {
   // Nothing to do here
 }
 
-static void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
-                            grpc_pollset_set* pollset_set) {
+void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
+                     grpc_pollset_set* pollset_set) {
   // Nothing to do here
 }
 
-static grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
+grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
+
+const grpc_transport_vtable inproc_vtable = {
+    sizeof(inproc_stream), "inproc",        init_stream,
+    set_pollset,           set_pollset_set, perform_stream_op,
+    perform_transport_op,  destroy_stream,  destroy_transport,
+    get_endpoint};
+
+/*******************************************************************************
+ * Main inproc transport functions
+ */
+void inproc_transports_create(grpc_transport** server_transport,
+                              const grpc_channel_args* server_args,
+                              grpc_transport** client_transport,
+                              const grpc_channel_args* client_args) {
+  INPROC_LOG(GPR_INFO, "inproc_transports_create");
+  shared_mu* mu = new (gpr_malloc(sizeof(*mu))) shared_mu();
+  inproc_transport* st = new (gpr_malloc(sizeof(*st)))
+      inproc_transport(&inproc_vtable, mu, /*is_client=*/false);
+  inproc_transport* ct = new (gpr_malloc(sizeof(*ct)))
+      inproc_transport(&inproc_vtable, mu, /*is_client=*/true);
+  st->other_side = ct;
+  ct->other_side = st;
+  *server_transport = reinterpret_cast<grpc_transport*>(st);
+  *client_transport = reinterpret_cast<grpc_transport*>(ct);
+}
+}  // namespace
 
 /*******************************************************************************
  * GLOBAL INIT AND DESTROY
@@ -1190,48 +1216,6 @@ void grpc_inproc_transport_init(void) {
   g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
 }
 
-static const grpc_transport_vtable inproc_vtable = {
-    sizeof(inproc_stream), "inproc",        init_stream,
-    set_pollset,           set_pollset_set, perform_stream_op,
-    perform_transport_op,  destroy_stream,  destroy_transport,
-    get_endpoint};
-
-/*******************************************************************************
- * Main inproc transport functions
- */
-static void inproc_transports_create(grpc_transport** server_transport,
-                                     const grpc_channel_args* server_args,
-                                     grpc_transport** client_transport,
-                                     const grpc_channel_args* client_args) {
-  INPROC_LOG(GPR_INFO, "inproc_transports_create");
-  inproc_transport* st =
-      static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
-  inproc_transport* ct =
-      static_cast<inproc_transport*>(gpr_zalloc(sizeof(*ct)));
-  // Share one lock between both sides since both sides get affected
-  st->mu = ct->mu = static_cast<shared_mu*>(gpr_malloc(sizeof(*st->mu)));
-  gpr_mu_init(&st->mu->mu);
-  gpr_ref_init(&st->mu->refs, 2);
-  st->base.vtable = &inproc_vtable;
-  ct->base.vtable = &inproc_vtable;
-  // Start each side of transport with 2 refs since they each have a ref
-  // to the other
-  gpr_ref_init(&st->refs, 2);
-  gpr_ref_init(&ct->refs, 2);
-  st->is_client = false;
-  ct->is_client = true;
-  grpc_connectivity_state_init(&st->connectivity, GRPC_CHANNEL_READY,
-                               "inproc_server");
-  grpc_connectivity_state_init(&ct->connectivity, GRPC_CHANNEL_READY,
-                               "inproc_client");
-  st->other_side = ct;
-  ct->other_side = st;
-  st->stream_list = nullptr;
-  ct->stream_list = nullptr;
-  *server_transport = reinterpret_cast<grpc_transport*>(st);
-  *client_transport = reinterpret_cast<grpc_transport*>(ct);
-}
-
 grpc_channel* grpc_inproc_channel_create(grpc_server* server,
                                          grpc_channel_args* args,
                                          void* reserved) {
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 35c3fb01eaa..0de8c670790 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -79,11 +79,11 @@ typedef struct {
 } grpc_call_stats;
 
 /** Information about the call upon completion. */
-typedef struct {
+struct grpc_call_final_info {
   grpc_call_stats stats;
-  grpc_status_code final_status;
-  const char* error_string;
-} grpc_call_final_info;
+  grpc_status_code final_status = GRPC_STATUS_OK;
+  const char* error_string = nullptr;
+};
 
 /* Channel filters specify:
    1. the amount of memory needed in the channel & call (via the sizeof_XXX
diff --git a/src/core/lib/channel/context.h b/src/core/lib/channel/context.h
index 5daf48a9a9e..763e4ffc9fe 100644
--- a/src/core/lib/channel/context.h
+++ b/src/core/lib/channel/context.h
@@ -41,9 +41,9 @@ typedef enum {
   GRPC_CONTEXT_COUNT
 } grpc_context_index;
 
-typedef struct {
-  void* value;
-  void (*destroy)(void*);
-} grpc_call_context_element;
+struct grpc_call_context_element {
+  void* value = nullptr;
+  void (*destroy)(void*) = nullptr;
+};
 
 #endif /* GRPC_CORE_LIB_CHANNEL_CONTEXT_H */
diff --git a/src/core/lib/gpr/arena.cc b/src/core/lib/gpr/arena.cc
index 77f9357146d..836a7ca793d 100644
--- a/src/core/lib/gpr/arena.cc
+++ b/src/core/lib/gpr/arena.cc
@@ -21,6 +21,7 @@
 #include "src/core/lib/gpr/arena.h"
 
 #include <string.h>
+#include <new>
 
 #include <grpc/support/alloc.h>
 #include <grpc/support/atm.h>
@@ -28,34 +29,79 @@
 #include <grpc/support/sync.h>
 
 #include "src/core/lib/gpr/alloc.h"
+#include "src/core/lib/gpr/env.h"
+#include "src/core/lib/gprpp/memory.h"
+
+namespace {
+enum init_strategy {
+  NO_INIT,        // Do not initialize the arena blocks.
+  ZERO_INIT,      // Initialize arena blocks with 0.
+  NON_ZERO_INIT,  // Initialize arena blocks with a non-zero value.
+};
+
+gpr_once g_init_strategy_once = GPR_ONCE_INIT;
+init_strategy g_init_strategy = NO_INIT;
+}  // namespace
+
+static void set_strategy_from_env() {
+  char* str = gpr_getenv("GRPC_ARENA_INIT_STRATEGY");
+  if (str == nullptr) {
+    g_init_strategy = NO_INIT;
+  } else if (strcmp(str, "zero_init") == 0) {
+    g_init_strategy = ZERO_INIT;
+  } else if (strcmp(str, "non_zero_init") == 0) {
+    g_init_strategy = NON_ZERO_INIT;
+  } else {
+    g_init_strategy = NO_INIT;
+  }
+  gpr_free(str);
+}
+
+static void* gpr_arena_alloc_maybe_init(size_t size) {
+  void* mem = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
+  gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+  if (GPR_UNLIKELY(g_init_strategy != NO_INIT)) {
+    if (g_init_strategy == ZERO_INIT) {
+      memset(mem, 0, size);
+    } else {  // NON_ZERO_INIT.
+      memset(mem, 0xFE, size);
+    }
+  }
+  return mem;
+}
+
+void gpr_arena_init() {
+  gpr_once_init(&g_init_strategy_once, set_strategy_from_env);
+}
 
 // Uncomment this to use a simple arena that simply allocates the
 // requested amount of memory for each call to gpr_arena_alloc().  This
 // effectively eliminates the efficiency gain of using an arena, but it
 // may be useful for debugging purposes.
 //#define SIMPLE_ARENA_FOR_DEBUGGING
-
 #ifdef SIMPLE_ARENA_FOR_DEBUGGING
 
 struct gpr_arena {
+  gpr_arena() { gpr_mu_init(&mu); }
+  ~gpr_arena() {
+    gpr_mu_destroy(&mu);
+    for (size_t i = 0; i < num_ptrs; ++i) {
+      gpr_free_aligned(ptrs[i]);
+    }
+    gpr_free(ptrs);
+  }
+
   gpr_mu mu;
-  void** ptrs;
-  size_t num_ptrs;
+  void** ptrs = nullptr;
+  size_t num_ptrs = 0;
 };
 
 gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
-  gpr_arena* arena = (gpr_arena*)gpr_zalloc(sizeof(*arena));
-  gpr_mu_init(&arena->mu);
-  return arena;
+  return grpc_core::New<gpr_arena>();
 }
 
 size_t gpr_arena_destroy(gpr_arena* arena) {
-  gpr_mu_destroy(&arena->mu);
-  for (size_t i = 0; i < arena->num_ptrs; ++i) {
-    gpr_free(arena->ptrs[i]);
-  }
-  gpr_free(arena->ptrs);
-  gpr_free(arena);
+  grpc_core::Delete(arena);
   return 1;  // Value doesn't matter, since it won't be used.
 }
 
@@ -63,7 +109,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
   gpr_mu_lock(&arena->mu);
   arena->ptrs =
       (void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
-  void* retval = arena->ptrs[arena->num_ptrs++] = gpr_zalloc(size);
+  void* retval = arena->ptrs[arena->num_ptrs++] =
+      gpr_arena_alloc_maybe_init(size);
   gpr_mu_unlock(&arena->mu);
   return retval;
 }
@@ -77,45 +124,45 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
 // would allow us to use the alignment actually needed by the caller.
 
 typedef struct zone {
-  zone* next;
+  zone* next = nullptr;
 } zone;
 
 struct gpr_arena {
+  gpr_arena(size_t initial_size)
+      : initial_zone_size(initial_size), last_zone(&initial_zone) {
+    gpr_mu_init(&arena_growth_mutex);
+  }
+  ~gpr_arena() {
+    gpr_mu_destroy(&arena_growth_mutex);
+    zone* z = initial_zone.next;
+    while (z) {
+      zone* next_z = z->next;
+      z->~zone();
+      gpr_free_aligned(z);
+      z = next_z;
+    }
+  }
+
   // Keep track of the total used size. We use this in our call sizing
   // historesis.
-  gpr_atm total_used;
+  gpr_atm total_used = 0;
   size_t initial_zone_size;
   zone initial_zone;
   zone* last_zone;
   gpr_mu arena_growth_mutex;
 };
 
-static void* zalloc_aligned(size_t size) {
-  void* ptr = gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
-  memset(ptr, 0, size);
-  return ptr;
-}
-
 gpr_arena* gpr_arena_create(size_t initial_size) {
   initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
-  gpr_arena* a = static_cast<gpr_arena*>(zalloc_aligned(
-      GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size));
-  a->initial_zone_size = initial_size;
-  a->last_zone = &a->initial_zone;
-  gpr_mu_init(&a->arena_growth_mutex);
-  return a;
+  return new (gpr_arena_alloc_maybe_init(
+      GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
+      gpr_arena(initial_size);
 }
 
 size_t gpr_arena_destroy(gpr_arena* arena) {
-  gpr_mu_destroy(&arena->arena_growth_mutex);
-  gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
-  zone* z = arena->initial_zone.next;
+  const gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
+  arena->~gpr_arena();
   gpr_free_aligned(arena);
-  while (z) {
-    zone* next_z = z->next;
-    gpr_free_aligned(z);
-    z = next_z;
-  }
   return static_cast<size_t>(size);
 }
 
@@ -132,8 +179,8 @@ void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
     // sizing historesis (that is, most calls should have a large enough initial
     // zone and will not need to grow the arena).
     gpr_mu_lock(&arena->arena_growth_mutex);
-    zone* z = static_cast<zone*>(
-        zalloc_aligned(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size));
+    zone* z = new (gpr_arena_alloc_maybe_init(
+        GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
     arena->last_zone->next = z;
     arena->last_zone = z;
     gpr_mu_unlock(&arena->arena_growth_mutex);
diff --git a/src/core/lib/gpr/arena.h b/src/core/lib/gpr/arena.h
index 6d2a073dd58..069892b2282 100644
--- a/src/core/lib/gpr/arena.h
+++ b/src/core/lib/gpr/arena.h
@@ -37,5 +37,7 @@ gpr_arena* gpr_arena_create(size_t initial_size);
 void* gpr_arena_alloc(gpr_arena* arena, size_t size);
 // Destroy an arena, returning the total number of bytes allocated
 size_t gpr_arena_destroy(gpr_arena* arena);
+// Initializes the Arena component.
+void gpr_arena_init();
 
 #endif /* GRPC_CORE_LIB_GPR_ARENA_H */
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index 00a839b64c6..90dda45ba37 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -40,6 +40,8 @@ static gpr_atm encode_cancel_state_error(grpc_error* error) {
 }
 
 void grpc_call_combiner_init(grpc_call_combiner* call_combiner) {
+  gpr_atm_no_barrier_store(&call_combiner->cancel_state, 0);
+  gpr_atm_no_barrier_store(&call_combiner->size, 0);
   gpr_mpscq_init(&call_combiner->queue);
 }
 
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 6f7ddd40435..c943fb15570 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -41,12 +41,12 @@
 extern grpc_core::TraceFlag grpc_call_combiner_trace;
 
 typedef struct {
-  gpr_atm size;  // size_t, num closures in queue or currently executing
+  gpr_atm size = 0;  // size_t, num closures in queue or currently executing
   gpr_mpscq queue;
   // Either 0 (if not cancelled and no cancellation closure set),
   // a grpc_closure* (if the lowest bit is 0),
   // or a grpc_error* (if the lowest bit is 1).
-  gpr_atm cancel_state;
+  gpr_atm cancel_state = 0;
 } grpc_call_combiner;
 
 // Assumes memory was initialized to zero.
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index f14c723844e..bde3437c02e 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -114,6 +114,7 @@ inline grpc_closure* grpc_closure_init(grpc_closure* closure,
   closure->cb = cb;
   closure->cb_arg = cb_arg;
   closure->scheduler = scheduler;
+  closure->error_data.error = GRPC_ERROR_NONE;
 #ifndef NDEBUG
   closure->scheduled = false;
   closure->file_initiated = nullptr;
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index a95e08524c5..6f4c5bdd665 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -34,13 +34,13 @@ typedef enum grpc_pollset_tag {
  * functions that accept a pollset XOR a pollset_set to do so through an
  * abstract interface. No ownership is taken. */
 
-typedef struct grpc_polling_entity {
+struct grpc_polling_entity {
   union {
-    grpc_pollset* pollset;
+    grpc_pollset* pollset = nullptr;
     grpc_pollset_set* pollset_set;
   } pollent;
-  grpc_pollset_tag tag;
-} grpc_polling_entity;
+  grpc_pollset_tag tag = GRPC_POLLS_NONE;
+};
 
 grpc_polling_entity grpc_polling_entity_create_from_pollset_set(
     grpc_pollset_set* pollset_set);
diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc
index 94c9c69fcdb..16f40b4f55d 100644
--- a/src/core/lib/security/context/security_context.cc
+++ b/src/core/lib/security/context/security_context.cc
@@ -81,38 +81,45 @@ void grpc_auth_context_release(grpc_auth_context* context) {
 }
 
 /* --- grpc_client_security_context --- */
+grpc_client_security_context::~grpc_client_security_context() {
+  grpc_call_credentials_unref(creds);
+  GRPC_AUTH_CONTEXT_UNREF(auth_context, "client_security_context");
+  if (extension.instance != nullptr && extension.destroy != nullptr) {
+    extension.destroy(extension.instance);
+  }
+}
 
 grpc_client_security_context* grpc_client_security_context_create(
     gpr_arena* arena) {
-  return static_cast<grpc_client_security_context*>(
-      gpr_arena_alloc(arena, sizeof(grpc_client_security_context)));
+  return new (gpr_arena_alloc(arena, sizeof(grpc_client_security_context)))
+      grpc_client_security_context();
 }
 
 void grpc_client_security_context_destroy(void* ctx) {
   grpc_core::ExecCtx exec_ctx;
   grpc_client_security_context* c =
       static_cast<grpc_client_security_context*>(ctx);
-  grpc_call_credentials_unref(c->creds);
-  GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
-  if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
-    c->extension.destroy(c->extension.instance);
-  }
+  c->~grpc_client_security_context();
 }
 
 /* --- grpc_server_security_context --- */
+grpc_server_security_context::~grpc_server_security_context() {
+  GRPC_AUTH_CONTEXT_UNREF(auth_context, "server_security_context");
+  if (extension.instance != nullptr && extension.destroy != nullptr) {
+    extension.destroy(extension.instance);
+  }
+}
+
 grpc_server_security_context* grpc_server_security_context_create(
     gpr_arena* arena) {
-  return static_cast<grpc_server_security_context*>(
-      gpr_arena_alloc(arena, sizeof(grpc_server_security_context)));
+  return new (gpr_arena_alloc(arena, sizeof(grpc_server_security_context)))
+      grpc_server_security_context();
 }
 
 void grpc_server_security_context_destroy(void* ctx) {
   grpc_server_security_context* c =
       static_cast<grpc_server_security_context*>(ctx);
-  GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "server_security_context");
-  if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
-    c->extension.destroy(c->extension.instance);
-  }
+  c->~grpc_server_security_context();
 }
 
 /* --- grpc_auth_context --- */
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index a8e1c3fd647..e45415f63b8 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -34,18 +34,20 @@ struct gpr_arena;
 
 /* Property names are always NULL terminated. */
 
-typedef struct {
-  grpc_auth_property* array;
-  size_t count;
-  size_t capacity;
-} grpc_auth_property_array;
+struct grpc_auth_property_array {
+  grpc_auth_property* array = nullptr;
+  size_t count = 0;
+  size_t capacity = 0;
+};
 
 struct grpc_auth_context {
-  struct grpc_auth_context* chained;
+  grpc_auth_context() { gpr_ref_init(&refcount, 0); }
+
+  struct grpc_auth_context* chained = nullptr;
   grpc_auth_property_array properties;
   gpr_refcount refcount;
-  const char* peer_identity_property_name;
-  grpc_pollset* pollset;
+  const char* peer_identity_property_name = nullptr;
+  grpc_pollset* pollset = nullptr;
 };
 
 /* Creation. */
@@ -76,20 +78,23 @@ void grpc_auth_property_reset(grpc_auth_property* property);
    Extension to the security context that may be set in a filter and accessed
    later by a higher level method on a grpc_call object. */
 
-typedef struct {
-  void* instance;
-  void (*destroy)(void*);
-} grpc_security_context_extension;
+struct grpc_security_context_extension {
+  void* instance = nullptr;
+  void (*destroy)(void*) = nullptr;
+};
 
 /* --- grpc_client_security_context ---
 
    Internal client-side security context. */
 
-typedef struct {
-  grpc_call_credentials* creds;
-  grpc_auth_context* auth_context;
+struct grpc_client_security_context {
+  grpc_client_security_context() = default;
+  ~grpc_client_security_context();
+
+  grpc_call_credentials* creds = nullptr;
+  grpc_auth_context* auth_context = nullptr;
   grpc_security_context_extension extension;
-} grpc_client_security_context;
+};
 
 grpc_client_security_context* grpc_client_security_context_create(
     gpr_arena* arena);
@@ -99,10 +104,13 @@ void grpc_client_security_context_destroy(void* ctx);
 
    Internal server-side security context. */
 
-typedef struct {
-  grpc_auth_context* auth_context;
+struct grpc_server_security_context {
+  grpc_server_security_context() = default;
+  ~grpc_server_security_context();
+
+  grpc_auth_context* auth_context = nullptr;
   grpc_security_context_extension extension;
-} grpc_server_security_context;
+};
 
 grpc_server_security_context* grpc_server_security_context_create(
     gpr_arena* arena);
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index b486d25ab2e..3878958b38b 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -142,8 +142,8 @@ grpc_channel_credentials* grpc_channel_credentials_find_in_args(
 /* --- grpc_credentials_mdelem_array. --- */
 
 typedef struct {
-  grpc_mdelem* md;
-  size_t size;
+  grpc_mdelem* md = nullptr;
+  size_t size = 0;
 } grpc_credentials_mdelem_array;
 
 /// Takes a new ref to \a md.
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index e34eacc8d70..6955e8698e2 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -43,20 +43,39 @@
 namespace {
 /* We can have a per-call credentials. */
 struct call_data {
+  call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+      : arena(args.arena),
+        owning_call(args.call_stack),
+        call_combiner(args.call_combiner) {}
+
+  // This method is technically the dtor of this class. However, since
+  // `get_request_metadata_cancel_closure` can run in parallel to
+  // `destroy_call_elem`, we cannot call the dtor in them. Otherwise,
+  // fields will be accessed after calling dtor, and msan correctly complains
+  // that the memory is not initialized.
+  void destroy() {
+    grpc_credentials_mdelem_array_destroy(&md_array);
+    grpc_call_credentials_unref(creds);
+    grpc_slice_unref_internal(host);
+    grpc_slice_unref_internal(method);
+    grpc_auth_metadata_context_reset(&auth_md_context);
+  }
+
   gpr_arena* arena;
   grpc_call_stack* owning_call;
   grpc_call_combiner* call_combiner;
-  grpc_call_credentials* creds;
-  grpc_slice host;
-  grpc_slice method;
+  grpc_call_credentials* creds = nullptr;
+  grpc_slice host = grpc_empty_slice();
+  grpc_slice method = grpc_empty_slice();
   /* pollset{_set} bound to this call; if we need to make external
      network requests, they should be done under a pollset added to this
      pollset_set so that work can progress when this call wants work to progress
   */
-  grpc_polling_entity* pollent;
+  grpc_polling_entity* pollent = nullptr;
   grpc_credentials_mdelem_array md_array;
-  grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT];
-  grpc_auth_metadata_context auth_md_context;
+  grpc_linked_mdelem md_links[MAX_CREDENTIALS_METADATA_COUNT] = {};
+  grpc_auth_metadata_context auth_md_context =
+      grpc_auth_metadata_context();  // Zero-initialize the C struct.
   grpc_closure async_result_closure;
   grpc_closure check_call_host_cancel_closure;
   grpc_closure get_request_metadata_cancel_closure;
@@ -334,12 +353,7 @@ static void auth_start_transport_stream_op_batch(
 /* Constructor for call_data */
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  calld->arena = args->arena;
-  calld->owning_call = args->call_stack;
-  calld->call_combiner = args->call_combiner;
-  calld->host = grpc_empty_slice();
-  calld->method = grpc_empty_slice();
+  new (elem->call_data) call_data(elem, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -354,11 +368,7 @@ static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  grpc_credentials_mdelem_array_destroy(&calld->md_array);
-  grpc_call_credentials_unref(calld->creds);
-  grpc_slice_unref_internal(calld->host);
-  grpc_slice_unref_internal(calld->method);
-  grpc_auth_metadata_context_reset(&calld->auth_md_context);
+  calld->destroy();
 }
 
 /* Constructor for channel_data */
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index f40f969bb7f..34d8435907f 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -22,6 +22,8 @@
    headers. Therefore, sockaddr.h must always be included first */
 #include <grpc/support/port_platform.h>
 
+#include <new>
+
 #include "src/core/lib/iomgr/sockaddr.h"
 
 #include <grpc/slice.h>
@@ -31,6 +33,7 @@
 #include <grpc/support/sync.h>
 #include "src/core/lib/debug/trace.h"
 #include "src/core/lib/gpr/string.h"
+#include "src/core/lib/gprpp/memory.h"
 #include "src/core/lib/profiling/timers.h"
 #include "src/core/lib/security/transport/secure_endpoint.h"
 #include "src/core/lib/security/transport/tsi_error.h"
@@ -40,44 +43,68 @@
 
 #define STAGING_BUFFER_SIZE 8192
 
-typedef struct {
+static void on_read(void* user_data, grpc_error* error);
+
+namespace {
+struct secure_endpoint {
+  secure_endpoint(const grpc_endpoint_vtable* vtable,
+                  tsi_frame_protector* protector,
+                  tsi_zero_copy_grpc_protector* zero_copy_protector,
+                  grpc_endpoint* transport, grpc_slice* leftover_slices,
+                  size_t leftover_nslices)
+      : wrapped_ep(transport),
+        protector(protector),
+        zero_copy_protector(zero_copy_protector) {
+    base.vtable = vtable;
+    gpr_mu_init(&protector_mu);
+    GRPC_CLOSURE_INIT(&on_read, ::on_read, this, grpc_schedule_on_exec_ctx);
+    grpc_slice_buffer_init(&source_buffer);
+    grpc_slice_buffer_init(&leftover_bytes);
+    for (size_t i = 0; i < leftover_nslices; i++) {
+      grpc_slice_buffer_add(&leftover_bytes,
+                            grpc_slice_ref_internal(leftover_slices[i]));
+    }
+    grpc_slice_buffer_init(&output_buffer);
+    gpr_ref_init(&ref, 1);
+  }
+
+  ~secure_endpoint() {
+    grpc_endpoint_destroy(wrapped_ep);
+    tsi_frame_protector_destroy(protector);
+    tsi_zero_copy_grpc_protector_destroy(zero_copy_protector);
+    grpc_slice_buffer_destroy_internal(&source_buffer);
+    grpc_slice_buffer_destroy_internal(&leftover_bytes);
+    grpc_slice_unref_internal(read_staging_buffer);
+    grpc_slice_unref_internal(write_staging_buffer);
+    grpc_slice_buffer_destroy_internal(&output_buffer);
+    gpr_mu_destroy(&protector_mu);
+  }
+
   grpc_endpoint base;
   grpc_endpoint* wrapped_ep;
   struct tsi_frame_protector* protector;
   struct tsi_zero_copy_grpc_protector* zero_copy_protector;
   gpr_mu protector_mu;
   /* saved upper level callbacks and user_data. */
-  grpc_closure* read_cb;
-  grpc_closure* write_cb;
+  grpc_closure* read_cb = nullptr;
+  grpc_closure* write_cb = nullptr;
   grpc_closure on_read;
-  grpc_slice_buffer* read_buffer;
+  grpc_slice_buffer* read_buffer = nullptr;
   grpc_slice_buffer source_buffer;
   /* saved handshaker leftover data to unprotect. */
   grpc_slice_buffer leftover_bytes;
   /* buffers for read and write */
-  grpc_slice read_staging_buffer;
-
-  grpc_slice write_staging_buffer;
+  grpc_slice read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
+  grpc_slice write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
   grpc_slice_buffer output_buffer;
 
   gpr_refcount ref;
-} secure_endpoint;
+};
+}  // namespace
 
 grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint");
 
-static void destroy(secure_endpoint* secure_ep) {
-  secure_endpoint* ep = secure_ep;
-  grpc_endpoint_destroy(ep->wrapped_ep);
-  tsi_frame_protector_destroy(ep->protector);
-  tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector);
-  grpc_slice_buffer_destroy_internal(&ep->leftover_bytes);
-  grpc_slice_unref_internal(ep->read_staging_buffer);
-  grpc_slice_unref_internal(ep->write_staging_buffer);
-  grpc_slice_buffer_destroy_internal(&ep->output_buffer);
-  grpc_slice_buffer_destroy_internal(&ep->source_buffer);
-  gpr_mu_destroy(&ep->protector_mu);
-  gpr_free(ep);
-}
+static void destroy(secure_endpoint* ep) { grpc_core::Delete(ep); }
 
 #ifndef NDEBUG
 #define SECURE_ENDPOINT_UNREF(ep, reason) \
@@ -405,25 +432,8 @@ grpc_endpoint* grpc_secure_endpoint_create(
     struct tsi_zero_copy_grpc_protector* zero_copy_protector,
     grpc_endpoint* transport, grpc_slice* leftover_slices,
     size_t leftover_nslices) {
-  size_t i;
-  secure_endpoint* ep =
-      static_cast<secure_endpoint*>(gpr_malloc(sizeof(secure_endpoint)));
-  ep->base.vtable = &vtable;
-  ep->wrapped_ep = transport;
-  ep->protector = protector;
-  ep->zero_copy_protector = zero_copy_protector;
-  grpc_slice_buffer_init(&ep->leftover_bytes);
-  for (i = 0; i < leftover_nslices; i++) {
-    grpc_slice_buffer_add(&ep->leftover_bytes,
-                          grpc_slice_ref_internal(leftover_slices[i]));
-  }
-  ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
-  ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
-  grpc_slice_buffer_init(&ep->output_buffer);
-  grpc_slice_buffer_init(&ep->source_buffer);
-  ep->read_buffer = nullptr;
-  GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
-  gpr_mu_init(&ep->protector_mu);
-  gpr_ref_init(&ep->ref, 1);
+  secure_endpoint* ep = grpc_core::New<secure_endpoint>(
+      &vtable, protector, zero_copy_protector, transport, leftover_slices,
+      leftover_nslices);
   return &ep->base;
 }
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index b99fc5e1783..362f49a5843 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -28,6 +28,9 @@
 #include "src/core/lib/security/transport/auth_filters.h"
 #include "src/core/lib/slice/slice_internal.h"
 
+static void recv_initial_metadata_ready(void* arg, grpc_error* error);
+static void recv_trailing_metadata_ready(void* user_data, grpc_error* error);
+
 namespace {
 enum async_state {
   STATE_INIT = 0,
@@ -35,28 +38,55 @@ enum async_state {
   STATE_CANCELLED,
 };
 
+struct channel_data {
+  grpc_auth_context* auth_context;
+  grpc_server_credentials* creds;
+};
+
 struct call_data {
+  call_data(grpc_call_element* elem, const grpc_call_element_args& args)
+      : call_combiner(args.call_combiner), owning_call(args.call_stack) {
+    GRPC_CLOSURE_INIT(&recv_initial_metadata_ready,
+                      ::recv_initial_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready,
+                      ::recv_trailing_metadata_ready, elem,
+                      grpc_schedule_on_exec_ctx);
+    // Create server security context.  Set its auth context from channel
+    // data and save it in the call context.
+    grpc_server_security_context* server_ctx =
+        grpc_server_security_context_create(args.arena);
+    channel_data* chand = static_cast<channel_data*>(elem->channel_data);
+    server_ctx->auth_context =
+        GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
+    if (args.context[GRPC_CONTEXT_SECURITY].value != nullptr) {
+      args.context[GRPC_CONTEXT_SECURITY].destroy(
+          args.context[GRPC_CONTEXT_SECURITY].value);
+    }
+    args.context[GRPC_CONTEXT_SECURITY].value = server_ctx;
+    args.context[GRPC_CONTEXT_SECURITY].destroy =
+        grpc_server_security_context_destroy;
+  }
+
+  ~call_data() { GRPC_ERROR_UNREF(recv_initial_metadata_error); }
+
   grpc_call_combiner* call_combiner;
   grpc_call_stack* owning_call;
   grpc_transport_stream_op_batch* recv_initial_metadata_batch;
   grpc_closure* original_recv_initial_metadata_ready;
   grpc_closure recv_initial_metadata_ready;
-  grpc_error* recv_initial_metadata_error;
+  grpc_error* recv_initial_metadata_error = GRPC_ERROR_NONE;
   grpc_closure recv_trailing_metadata_ready;
   grpc_closure* original_recv_trailing_metadata_ready;
   grpc_error* recv_trailing_metadata_error;
-  bool seen_recv_trailing_metadata_ready;
+  bool seen_recv_trailing_metadata_ready = false;
   grpc_metadata_array md;
   const grpc_metadata* consumed_md;
   size_t num_consumed_md;
   grpc_closure cancel_closure;
-  gpr_atm state;  // async_state
+  gpr_atm state = STATE_INIT;  // async_state
 };
 
-struct channel_data {
-  grpc_auth_context* auth_context;
-  grpc_server_credentials* creds;
-};
 }  // namespace
 
 static grpc_metadata_array metadata_batch_to_md_array(
@@ -244,29 +274,7 @@ static void auth_start_transport_stream_op_batch(
 /* Constructor for call_data */
 static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  call_data* calld = static_cast<call_data*>(elem->call_data);
-  channel_data* chand = static_cast<channel_data*>(elem->channel_data);
-  calld->call_combiner = args->call_combiner;
-  calld->owning_call = args->call_stack;
-  GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
-                    recv_initial_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_INIT(&calld->recv_trailing_metadata_ready,
-                    recv_trailing_metadata_ready, elem,
-                    grpc_schedule_on_exec_ctx);
-  // Create server security context.  Set its auth context from channel
-  // data and save it in the call context.
-  grpc_server_security_context* server_ctx =
-      grpc_server_security_context_create(args->arena);
-  server_ctx->auth_context =
-      GRPC_AUTH_CONTEXT_REF(chand->auth_context, "server_auth_filter");
-  if (args->context[GRPC_CONTEXT_SECURITY].value != nullptr) {
-    args->context[GRPC_CONTEXT_SECURITY].destroy(
-        args->context[GRPC_CONTEXT_SECURITY].value);
-  }
-  args->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
-  args->context[GRPC_CONTEXT_SECURITY].destroy =
-      grpc_server_security_context_destroy;
+  new (elem->call_data) call_data(elem, *args);
   return GRPC_ERROR_NONE;
 }
 
@@ -275,7 +283,7 @@ static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
-  GRPC_ERROR_UNREF(calld->recv_initial_metadata_error);
+  calld->~call_data();
 }
 
 /* Constructor for channel_data */
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a9349afa68a..735a78ad086 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -72,8 +72,11 @@
 // Used to create arena for the first call.
 #define ESTIMATED_MDELEM_COUNT 16
 
-typedef struct batch_control {
-  grpc_call* call;
+struct batch_control {
+  batch_control() { gpr_ref_init(&steps_to_complete, 0); }
+
+  grpc_call* call = nullptr;
+  grpc_transport_stream_op_batch op;
   /* Share memory for cq_completion and notify_tag as they are never needed
      simultaneously. Each byte used in this data structure count as six bytes
      per call, so any savings we can make are worthwhile,
@@ -96,84 +99,110 @@ typedef struct batch_control {
   grpc_closure start_batch;
   grpc_closure finish_batch;
   gpr_refcount steps_to_complete;
-  gpr_atm batch_error;
-  grpc_transport_stream_op_batch op;
-} batch_control;
+  gpr_atm batch_error = reinterpret_cast<gpr_atm>(GRPC_ERROR_NONE);
+};
+
+struct parent_call {
+  parent_call() { gpr_mu_init(&child_list_mu); }
+  ~parent_call() { gpr_mu_destroy(&child_list_mu); }
 
-typedef struct {
   gpr_mu child_list_mu;
-  grpc_call* first_child;
-} parent_call;
+  grpc_call* first_child = nullptr;
+};
 
-typedef struct {
+struct child_call {
+  child_call(grpc_call* parent) : parent(parent) {}
   grpc_call* parent;
   /** siblings: children of the same parent form a list, and this list is
      protected under
       parent->mu */
-  grpc_call* sibling_next;
-  grpc_call* sibling_prev;
-} child_call;
+  grpc_call* sibling_next = nullptr;
+  grpc_call* sibling_prev = nullptr;
+};
 
 #define RECV_NONE ((gpr_atm)0)
 #define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
 
 struct grpc_call {
+  grpc_call(gpr_arena* arena, const grpc_call_create_args& args)
+      : arena(arena),
+        cq(args.cq),
+        channel(args.channel),
+        is_client(args.server_transport_data == nullptr),
+        stream_op_payload(context) {
+    gpr_ref_init(&ext_ref, 1);
+    grpc_call_combiner_init(&call_combiner);
+    for (int i = 0; i < 2; i++) {
+      for (int j = 0; j < 2; j++) {
+        metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
+      }
+    }
+  }
+
+  ~grpc_call() {
+    gpr_free(static_cast<void*>(const_cast<char*>(final_info.error_string)));
+    grpc_call_combiner_destroy(&call_combiner);
+  }
+
   gpr_refcount ext_ref;
   gpr_arena* arena;
   grpc_call_combiner call_combiner;
   grpc_completion_queue* cq;
   grpc_polling_entity pollent;
   grpc_channel* channel;
-  gpr_timespec start_time;
-  /* parent_call* */ gpr_atm parent_call_atm;
-  child_call* child;
+  gpr_timespec start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+  /* parent_call* */ gpr_atm parent_call_atm = 0;
+  child_call* child = nullptr;
 
   /* client or server call */
   bool is_client;
   /** has grpc_call_unref been called */
-  bool destroy_called;
+  bool destroy_called = false;
   /** flag indicating that cancellation is inherited */
-  bool cancellation_is_inherited;
+  bool cancellation_is_inherited = false;
   /** which ops are in-flight */
-  bool sent_initial_metadata;
-  bool sending_message;
-  bool sent_final_op;
-  bool received_initial_metadata;
-  bool receiving_message;
-  bool requested_final_op;
-  gpr_atm any_ops_sent_atm;
-  gpr_atm received_final_op_atm;
-
-  batch_control* active_batches[MAX_CONCURRENT_BATCHES];
+  bool sent_initial_metadata = false;
+  bool sending_message = false;
+  bool sent_final_op = false;
+  bool received_initial_metadata = false;
+  bool receiving_message = false;
+  bool requested_final_op = false;
+  gpr_atm any_ops_sent_atm = 0;
+  gpr_atm received_final_op_atm = 0;
+
+  batch_control* active_batches[MAX_CONCURRENT_BATCHES] = {};
   grpc_transport_stream_op_batch_payload stream_op_payload;
 
   /* first idx: is_receiving, second idx: is_trailing */
-  grpc_metadata_batch metadata_batch[2][2];
+  grpc_metadata_batch metadata_batch[2][2] = {};
 
   /* Buffered read metadata waiting to be returned to the application.
      Element 0 is initial metadata, element 1 is trailing metadata. */
-  grpc_metadata_array* buffered_metadata[2];
+  grpc_metadata_array* buffered_metadata[2] = {};
 
   grpc_metadata compression_md;
 
   // A char* indicating the peer name.
-  gpr_atm peer_string;
+  gpr_atm peer_string = 0;
 
   /* Call data useful used for reporting. Only valid after the call has
    * completed */
   grpc_call_final_info final_info;
 
   /* Compression algorithm for *incoming* data */
-  grpc_message_compression_algorithm incoming_message_compression_algorithm;
+  grpc_message_compression_algorithm incoming_message_compression_algorithm =
+      GRPC_MESSAGE_COMPRESS_NONE;
   /* Stream compression algorithm for *incoming* data */
-  grpc_stream_compression_algorithm incoming_stream_compression_algorithm;
-  /* Supported encodings (compression algorithms), a bitset */
-  uint32_t encodings_accepted_by_peer;
+  grpc_stream_compression_algorithm incoming_stream_compression_algorithm =
+      GRPC_STREAM_COMPRESS_NONE;
+  /* Supported encodings (compression algorithms), a bitset.
+   * Always support no compression. */
+  uint32_t encodings_accepted_by_peer = 1 << GRPC_MESSAGE_COMPRESS_NONE;
   /* Supported stream encodings (stream compression algorithms), a bitset */
-  uint32_t stream_encodings_accepted_by_peer;
+  uint32_t stream_encodings_accepted_by_peer = 0;
 
   /* Contexts for various subsystems (security, tracing, ...). */
-  grpc_call_context_element context[GRPC_CONTEXT_COUNT];
+  grpc_call_context_element context[GRPC_CONTEXT_COUNT] = {};
 
   /* for the client, extra metadata is initial metadata; for the
      server, it's trailing metadata */
@@ -184,14 +213,14 @@ struct grpc_call {
   grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> sending_stream;
 
   grpc_core::OrphanablePtr<grpc_core::ByteStream> receiving_stream;
-  grpc_byte_buffer** receiving_buffer;
-  grpc_slice receiving_slice;
+  grpc_byte_buffer** receiving_buffer = nullptr;
+  grpc_slice receiving_slice = grpc_empty_slice();
   grpc_closure receiving_slice_ready;
   grpc_closure receiving_stream_ready;
   grpc_closure receiving_initial_metadata_ready;
   grpc_closure receiving_trailing_metadata_ready;
-  uint32_t test_only_last_message_flags;
-  gpr_atm cancelled;
+  uint32_t test_only_last_message_flags = 0;
+  gpr_atm cancelled = 0;
 
   grpc_closure release_call;
 
@@ -207,7 +236,7 @@ struct grpc_call {
       grpc_server* server;
     } server;
   } final_op;
-  gpr_atm status_error;
+  gpr_atm status_error = 0;
 
   /* recv_state can contain one of the following values:
      RECV_NONE :                 :  no initial metadata and messages received
@@ -225,7 +254,7 @@ struct grpc_call {
 
     For 1, 4: See receiving_initial_metadata_ready() function
     For 2, 3: See receiving_stream_ready() function */
-  gpr_atm recv_state;
+  gpr_atm recv_state = 0;
 };
 
 grpc_core::TraceFlag grpc_call_error_trace(false, "call_error");
@@ -269,11 +298,10 @@ void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
 static parent_call* get_or_create_parent_call(grpc_call* call) {
   parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
   if (p == nullptr) {
-    p = static_cast<parent_call*>(gpr_arena_alloc(call->arena, sizeof(*p)));
-    gpr_mu_init(&p->child_list_mu);
+    p = new (gpr_arena_alloc(call->arena, sizeof(*p))) parent_call();
     if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
                          (gpr_atm)p)) {
-      gpr_mu_destroy(&p->child_list_mu);
+      p->~parent_call();
       p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
     }
   }
@@ -292,7 +320,9 @@ size_t grpc_call_get_initial_size_estimate() {
 grpc_error* grpc_call_create(const grpc_call_create_args* args,
                              grpc_call** out_call) {
   GPR_TIMER_SCOPE("grpc_call_create", 0);
-  size_t i, j;
+
+  GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
+
   grpc_error* error = GRPC_ERROR_NONE;
   grpc_channel_stack* channel_stack =
       grpc_channel_get_channel_stack(args->channel);
@@ -300,27 +330,19 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
   size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
   GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
   gpr_arena* arena = gpr_arena_create(initial_size);
-  call = static_cast<grpc_call*>(
-      gpr_arena_alloc(arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
-                                 channel_stack->call_stack_size));
-  gpr_ref_init(&call->ext_ref, 1);
-  gpr_atm_no_barrier_store(&call->cancelled, 0);
-  call->arena = arena;
-  grpc_call_combiner_init(&call->call_combiner);
+  call = new (gpr_arena_alloc(
+      arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
+                 channel_stack->call_stack_size)) grpc_call(arena, *args);
   *out_call = call;
-  call->channel = args->channel;
-  call->cq = args->cq;
-  call->start_time = gpr_now(GPR_CLOCK_MONOTONIC);
-  /* Always support no compression */
-  GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_MESSAGE_COMPRESS_NONE);
-  call->is_client = args->server_transport_data == nullptr;
-  call->stream_op_payload.context = call->context;
   grpc_slice path = grpc_empty_slice();
   if (call->is_client) {
+    call->final_op.client.status_details = nullptr;
+    call->final_op.client.status = nullptr;
+    call->final_op.client.error_string = nullptr;
     GRPC_STATS_INC_CLIENT_CALLS_CREATED();
     GPR_ASSERT(args->add_initial_metadata_count <
                MAX_SEND_EXTRA_METADATA_COUNT);
-    for (i = 0; i < args->add_initial_metadata_count; i++) {
+    for (size_t i = 0; i < args->add_initial_metadata_count; i++) {
       call->send_extra_metadata[i].md = args->add_initial_metadata[i];
       if (grpc_slice_eq(GRPC_MDKEY(args->add_initial_metadata[i]),
                         GRPC_MDSTR_PATH)) {
@@ -332,23 +354,18 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
         static_cast<int>(args->add_initial_metadata_count);
   } else {
     GRPC_STATS_INC_SERVER_CALLS_CREATED();
+    call->final_op.server.cancelled = nullptr;
     call->final_op.server.server = args->server;
     GPR_ASSERT(args->add_initial_metadata_count == 0);
     call->send_extra_metadata_count = 0;
   }
-  for (i = 0; i < 2; i++) {
-    for (j = 0; j < 2; j++) {
-      call->metadata_batch[i][j].deadline = GRPC_MILLIS_INF_FUTURE;
-    }
-  }
-  grpc_millis send_deadline = args->send_deadline;
 
+  grpc_millis send_deadline = args->send_deadline;
   bool immediately_cancel = false;
 
   if (args->parent != nullptr) {
-    call->child =
-        static_cast<child_call*>(gpr_arena_alloc(arena, sizeof(child_call)));
-    call->child->parent = args->parent;
+    call->child = new (gpr_arena_alloc(arena, sizeof(child_call)))
+        child_call(args->parent);
 
     GRPC_CALL_INTERNAL_REF(args->parent, "child");
     GPR_ASSERT(call->is_client);
@@ -382,10 +399,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
       }
     }
   }
-
   call->send_deadline = send_deadline;
-
-  GRPC_CHANNEL_INTERNAL_REF(args->channel, "call");
   /* initial refcount dropped by grpc_call_unref */
   grpc_call_element_args call_args = {CALL_STACK_FROM_CALL(call),
                                       args->server_transport_data,
@@ -413,6 +427,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
     }
     gpr_mu_unlock(&pc->child_list_mu);
   }
+
   if (error != GRPC_ERROR_NONE) {
     cancel_with_error(call, GRPC_ERROR_REF(error));
   }
@@ -487,9 +502,9 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
 static void release_call(void* call, grpc_error* error) {
   grpc_call* c = static_cast<grpc_call*>(call);
   grpc_channel* channel = c->channel;
-  gpr_free(static_cast<void*>(const_cast<char*>(c->final_info.error_string)));
-  grpc_call_combiner_destroy(&c->call_combiner);
-  grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
+  gpr_arena* arena = c->arena;
+  c->~grpc_call();
+  grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(arena));
   GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
 }
 
@@ -505,7 +520,7 @@ static void destroy_call(void* call, grpc_error* error) {
   c->receiving_stream.reset();
   parent_call* pc = get_parent_call(c);
   if (pc != nullptr) {
-    gpr_mu_destroy(&pc->child_list_mu);
+    pc->~parent_call();
   }
   for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
     GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
@@ -1100,10 +1115,11 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
     if (bctl->call != nullptr) {
       return nullptr;
     }
-    memset(bctl, 0, sizeof(*bctl));
+    bctl->~batch_control();
+    bctl->op = {};
   } else {
-    bctl = static_cast<batch_control*>(
-        gpr_arena_alloc(call->arena, sizeof(batch_control)));
+    bctl = new (gpr_arena_alloc(call->arena, sizeof(batch_control)))
+        batch_control();
     *pslot = bctl;
   }
   bctl->call = call;
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index 230b89b3fc7..e47cb4360ea 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -322,8 +322,8 @@ static grpc_call* grpc_channel_create_call_internal(
   }
 
   grpc_call_create_args args;
-  memset(&args, 0, sizeof(args));
   args.channel = channel;
+  args.server = nullptr;
   args.parent = parent_call;
   args.propagation_mask = propagation_mask;
   args.cq = cq;
diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc
index 0ad82fed99e..c6198b8ae76 100644
--- a/src/core/lib/surface/init.cc
+++ b/src/core/lib/surface/init.cc
@@ -123,6 +123,7 @@ void grpc_init(void) {
     grpc_core::Fork::GlobalInit();
     grpc_fork_handlers_auto_register();
     gpr_time_init();
+    gpr_arena_init();
     grpc_stats_init();
     grpc_slice_intern_init();
     grpc_mdctx_global_init();
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 72391ca697b..93b19338098 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -824,11 +824,16 @@ static void accept_stream(void* cd, grpc_transport* transport,
   channel_data* chand = static_cast<channel_data*>(cd);
   /* create a call */
   grpc_call_create_args args;
-  memset(&args, 0, sizeof(args));
   args.channel = chand->channel;
+  args.server = chand->server;
+  args.parent = nullptr;
+  args.propagation_mask = 0;
+  args.cq = nullptr;
+  args.pollset_set_alternative = nullptr;
   args.server_transport_data = transport_server_data;
+  args.add_initial_metadata = nullptr;
+  args.add_initial_metadata_count = 0;
   args.send_deadline = GRPC_MILLIS_INF_FUTURE;
-  args.server = chand->server;
   grpc_call* call;
   grpc_error* error = grpc_call_create(&args, &call);
   grpc_call_element* elem =
@@ -840,8 +845,9 @@ static void accept_stream(void* cd, grpc_transport* transport,
   }
   call_data* calld = static_cast<call_data*>(elem->call_data);
   grpc_op op;
-  memset(&op, 0, sizeof(op));
   op.op = GRPC_OP_RECV_INITIAL_METADATA;
+  op.flags = 0;
+  op.reserved = nullptr;
   op.data.recv_initial_metadata.recv_initial_metadata =
       &calld->initial_metadata;
   GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 0bcbb32d1fe..f6e8bbf2052 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -31,9 +31,11 @@
 #include "src/core/lib/transport/static_metadata.h"
 
 typedef struct grpc_linked_mdelem {
+  grpc_linked_mdelem() {}
+
   grpc_mdelem md;
-  struct grpc_linked_mdelem* next;
-  struct grpc_linked_mdelem* prev;
+  struct grpc_linked_mdelem* next = nullptr;
+  struct grpc_linked_mdelem* prev = nullptr;
   void* reserved;
 } grpc_linked_mdelem;
 
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 9e784635c69..edfa7030d13 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -81,16 +81,16 @@ void grpc_stream_unref(grpc_stream_refcount* refcount);
 grpc_slice grpc_slice_from_stream_owned_buffer(grpc_stream_refcount* refcount,
                                                void* buffer, size_t length);
 
-typedef struct {
-  uint64_t framing_bytes;
-  uint64_t data_bytes;
-  uint64_t header_bytes;
-} grpc_transport_one_way_stats;
+struct grpc_transport_one_way_stats {
+  uint64_t framing_bytes = 0;
+  uint64_t data_bytes = 0;
+  uint64_t header_bytes = 0;
+};
 
-typedef struct grpc_transport_stream_stats {
+struct grpc_transport_stream_stats {
   grpc_transport_one_way_stats incoming;
   grpc_transport_one_way_stats outgoing;
-} grpc_transport_stream_stats;
+};
 
 void grpc_transport_move_one_way_stats(grpc_transport_one_way_stats* from,
                                        grpc_transport_one_way_stats* to);
@@ -121,7 +121,16 @@ typedef struct grpc_transport_stream_op_batch_payload
 
 /* Transport stream op: a set of operations to perform on a transport
    against a single stream */
-typedef struct grpc_transport_stream_op_batch {
+struct grpc_transport_stream_op_batch {
+  grpc_transport_stream_op_batch()
+      : send_initial_metadata(false),
+        send_trailing_metadata(false),
+        send_message(false),
+        recv_initial_metadata(false),
+        recv_message(false),
+        recv_trailing_metadata(false),
+        cancel_stream(false) {}
+
   /** Should be scheduled when all of the non-recv operations in the batch
       are complete.
 
@@ -131,10 +140,10 @@ typedef struct grpc_transport_stream_op_batch {
       scheduled as soon as the non-recv ops are complete, regardless of
       whether or not the recv ops are complete.  If a batch contains
       only recv ops, on_complete can be null. */
-  grpc_closure* on_complete;
+  grpc_closure* on_complete = nullptr;
 
   /** Values for the stream op (fields set are determined by flags above) */
-  grpc_transport_stream_op_batch_payload* payload;
+  grpc_transport_stream_op_batch_payload* payload = nullptr;
 
   /** Send initial metadata to the peer, from the provided metadata batch. */
   bool send_initial_metadata : 1;
@@ -163,24 +172,33 @@ typedef struct grpc_transport_stream_op_batch {
    * current handler of the op */
 
   grpc_handler_private_op_data handler_private;
-} grpc_transport_stream_op_batch;
+};
 
 struct grpc_transport_stream_op_batch_payload {
+  explicit grpc_transport_stream_op_batch_payload(
+      grpc_call_context_element* context)
+      : context(context) {}
+  ~grpc_transport_stream_op_batch_payload() {
+    // We don't really own `send_message`, so release ownership and let the
+    // owner clean the data.
+    send_message.send_message.release();
+  }
+
   struct {
-    grpc_metadata_batch* send_initial_metadata;
+    grpc_metadata_batch* send_initial_metadata = nullptr;
     /** Iff send_initial_metadata != NULL, flags associated with
         send_initial_metadata: a bitfield of GRPC_INITIAL_METADATA_xxx */
-    uint32_t send_initial_metadata_flags;
+    uint32_t send_initial_metadata_flags = 0;
     // If non-NULL, will be set by the transport to the peer string (a char*).
     // The transport retains ownership of the string.
     // Note: This pointer may be used by the transport after the
     // send_initial_metadata op is completed.  It must remain valid
     // until the call is destroyed.
-    gpr_atm* peer_string;
+    gpr_atm* peer_string = nullptr;
   } send_initial_metadata;
 
   struct {
-    grpc_metadata_batch* send_trailing_metadata;
+    grpc_metadata_batch* send_trailing_metadata = nullptr;
   } send_trailing_metadata;
 
   struct {
@@ -192,39 +210,39 @@ struct grpc_transport_stream_op_batch_payload {
   } send_message;
 
   struct {
-    grpc_metadata_batch* recv_initial_metadata;
+    grpc_metadata_batch* recv_initial_metadata = nullptr;
     // Flags are used only on the server side.  If non-null, will be set to
     // a bitfield of the GRPC_INITIAL_METADATA_xxx macros (e.g., to
     // indicate if the call is idempotent).
-    uint32_t* recv_flags;
+    uint32_t* recv_flags = nullptr;
     /** Should be enqueued when initial metadata is ready to be processed. */
-    grpc_closure* recv_initial_metadata_ready;
+    grpc_closure* recv_initial_metadata_ready = nullptr;
     // If not NULL, will be set to true if trailing metadata is
     // immediately available.  This may be a signal that we received a
     // Trailers-Only response.
-    bool* trailing_metadata_available;
+    bool* trailing_metadata_available = nullptr;
     // If non-NULL, will be set by the transport to the peer string (a char*).
     // The transport retains ownership of the string.
     // Note: This pointer may be used by the transport after the
     // recv_initial_metadata op is completed.  It must remain valid
     // until the call is destroyed.
-    gpr_atm* peer_string;
+    gpr_atm* peer_string = nullptr;
   } recv_initial_metadata;
 
   struct {
     // Will be set by the transport to point to the byte stream
     // containing a received message.
     // Will be NULL if trailing metadata is received instead of a message.
-    grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message;
+    grpc_core::OrphanablePtr<grpc_core::ByteStream>* recv_message = nullptr;
     /** Should be enqueued when one message is ready to be processed. */
-    grpc_closure* recv_message_ready;
+    grpc_closure* recv_message_ready = nullptr;
   } recv_message;
 
   struct {
-    grpc_metadata_batch* recv_trailing_metadata;
-    grpc_transport_stream_stats* collect_stats;
+    grpc_metadata_batch* recv_trailing_metadata = nullptr;
+    grpc_transport_stream_stats* collect_stats = nullptr;
     /** Should be enqueued when initial metadata is ready to be processed. */
-    grpc_closure* recv_trailing_metadata_ready;
+    grpc_closure* recv_trailing_metadata_ready = nullptr;
   } recv_trailing_metadata;
 
   /** Forcefully close this stream.
@@ -240,7 +258,7 @@ struct grpc_transport_stream_op_batch_payload {
   struct {
     // Error contract: the transport that gets this op must cause cancel_error
     //                 to be unref'ed after processing it
-    grpc_error* cancel_error;
+    grpc_error* cancel_error = GRPC_ERROR_NONE;
   } cancel_stream;
 
   /* Indexes correspond to grpc_context_index enum values */
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index 1f7831096c1..446dc93edb9 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -468,7 +468,7 @@ class NoOp {
 
 class SendEmptyMetadata {
  public:
-  SendEmptyMetadata() {
+  SendEmptyMetadata() : op_payload_(nullptr) {
     memset(&op_, 0, sizeof(op_));
     op_.on_complete = GRPC_CLOSURE_INIT(&closure_, DoNothing, nullptr,
                                         grpc_schedule_on_exec_ctx);
diff --git a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
index ba4b57ad228..85d233dd264 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
@@ -778,8 +778,7 @@ static void free_timeout(void* p) { gpr_free(p); }
 // Benchmark the current on_initial_header implementation
 static void OnInitialHeader(void* user_data, grpc_mdelem md) {
   // Setup for benchmark. This will bloat the absolute values of this benchmark
-  grpc_chttp2_incoming_metadata_buffer buffer;
-  grpc_chttp2_incoming_metadata_buffer_init(&buffer, (gpr_arena*)user_data);
+  grpc_chttp2_incoming_metadata_buffer buffer((gpr_arena*)user_data);
   bool seen_error = false;
 
   // Below here is the code we actually care about benchmarking
@@ -822,7 +821,6 @@ static void OnInitialHeader(void* user_data, grpc_mdelem md) {
       GPR_ASSERT(0);
     }
   }
-  grpc_chttp2_incoming_metadata_buffer_destroy(&buffer);
 }
 
 // Benchmark timeout handling
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index 189923a841e..f7ae16e61d4 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -262,7 +262,7 @@ static void BM_StreamCreateDestroy(benchmark::State& state) {
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   grpc_transport_stream_op_batch op;
-  grpc_transport_stream_op_batch_payload op_payload;
+  grpc_transport_stream_op_batch_payload op_payload(nullptr);
   memset(&op, 0, sizeof(op));
   op.cancel_stream = true;
   op.payload = &op_payload;
@@ -308,8 +308,7 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   grpc_transport_stream_op_batch op;
-  grpc_transport_stream_op_batch_payload op_payload;
-  memset(&op_payload, 0, sizeof(op_payload));
+  grpc_transport_stream_op_batch_payload op_payload(nullptr);
   std::unique_ptr<Closure> start;
   std::unique_ptr<Closure> done;
 
@@ -360,8 +359,7 @@ static void BM_TransportEmptyOp(benchmark::State& state) {
   Stream s(&f);
   s.Init(state);
   grpc_transport_stream_op_batch op;
-  grpc_transport_stream_op_batch_payload op_payload;
-  memset(&op_payload, 0, sizeof(op_payload));
+  grpc_transport_stream_op_batch_payload op_payload(nullptr);
   auto reset_op = [&]() {
     memset(&op, 0, sizeof(op));
     op.payload = &op_payload;
@@ -393,8 +391,7 @@ static void BM_TransportStreamSend(benchmark::State& state) {
   auto s = std::unique_ptr<Stream>(new Stream(&f));
   s->Init(state);
   grpc_transport_stream_op_batch op;
-  grpc_transport_stream_op_batch_payload op_payload;
-  memset(&op_payload, 0, sizeof(op_payload));
+  grpc_transport_stream_op_batch_payload op_payload(nullptr);
   auto reset_op = [&]() {
     memset(&op, 0, sizeof(op));
     op.payload = &op_payload;
@@ -526,8 +523,7 @@ static void BM_TransportStreamRecv(benchmark::State& state) {
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   s.Init(state);
-  grpc_transport_stream_op_batch_payload op_payload;
-  memset(&op_payload, 0, sizeof(op_payload));
+  grpc_transport_stream_op_batch_payload op_payload(nullptr);
   grpc_transport_stream_op_batch op;
   grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_stream;
   grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384);