[transport] Move transport interface to C++ (#34618)

Co-authored-by: ctiller <ctiller@users.noreply.github.com>
pull/34715/head^2
Craig Tiller 1 year ago committed by GitHub
parent 6d741ca724
commit a0c1027bb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      BUILD
  2. 1
      Package.swift
  3. 4
      build_autogenerated.yaml
  4. 2
      gRPC-C++.podspec
  5. 2
      gRPC-Core.podspec
  6. 1
      grpc.gemspec
  7. 1
      package.xml
  8. 4
      src/core/BUILD
  9. 5
      src/core/ext/filters/client_channel/connector.h
  10. 2
      src/core/ext/filters/client_channel/subchannel.cc
  11. 11
      src/core/ext/filters/http/client/http_client_filter.cc
  12. 9
      src/core/ext/filters/http/http_filters_plugin.cc
  13. 15
      src/core/ext/filters/rbac/rbac_filter.cc
  14. 2
      src/core/ext/transport/binder/client/binder_connector.cc
  15. 2
      src/core/ext/transport/binder/client/channel_create_impl.cc
  16. 2
      src/core/ext/transport/binder/server/binder_server.cc
  17. 98
      src/core/ext/transport/binder/transport/binder_transport.cc
  18. 37
      src/core/ext/transport/binder/transport/binder_transport.h
  19. 5
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  20. 17
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  21. 112
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  22. 6
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  23. 51
      src/core/ext/transport/chttp2/transport/internal.h
  24. 14
      src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
  25. 96
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  26. 6
      src/core/ext/transport/cronet/transport/cronet_transport.h
  27. 171
      src/core/ext/transport/inproc/legacy_inproc_transport.cc
  28. 119
      src/core/lib/channel/connected_channel.cc
  29. 4
      src/core/lib/surface/channel.cc
  30. 4
      src/core/lib/surface/channel.h
  31. 9
      src/core/lib/surface/server.cc
  32. 8
      src/core/lib/surface/server.h
  33. 8
      src/core/lib/transport/batch_builder.cc
  34. 3
      src/core/lib/transport/batch_builder.h
  35. 57
      src/core/lib/transport/transport.cc
  36. 151
      src/core/lib/transport/transport.h
  37. 4
      src/core/lib/transport/transport_fwd.h
  38. 102
      src/core/lib/transport/transport_impl.h
  39. 6
      test/core/bad_client/bad_client.cc
  40. 10
      test/core/bad_connection/close_fd_test.cc
  41. 45
      test/core/channel/minimal_stack_is_minimal_test.cc
  42. 1
      test/core/end2end/BUILD
  43. 9
      test/core/end2end/fixtures/sockpair_fixture.h
  44. 4
      test/core/end2end/fuzzers/client_fuzzer.cc
  45. 4
      test/core/end2end/fuzzers/server_fuzzer.cc
  46. 14
      test/core/transport/binder/binder_transport_test.cc
  47. 6
      test/core/transport/binder/end2end/end2end_binder_transport_test.cc
  48. 11
      test/core/transport/binder/end2end/fuzzers/client_fuzzer.cc
  49. 11
      test/core/transport/binder/end2end/fuzzers/server_fuzzer.cc
  50. 10
      test/core/transport/binder/end2end/testing_channel_create.cc
  51. 2
      test/core/transport/binder/end2end/testing_channel_create.h
  52. 13
      test/core/transport/chttp2/ping_configuration_test.cc
  53. 31
      test/core/xds/xds_channel_stack_modifier_test.cc
  54. 16
      test/cpp/microbenchmarks/BUILD
  55. 834
      test/cpp/microbenchmarks/bm_call_create.cc
  56. 22
      test/cpp/microbenchmarks/bm_chttp2_transport.cc
  57. 4
      test/cpp/microbenchmarks/fullstack_fixtures.h
  58. 4
      test/cpp/performance/writes_per_rpc_test.cc
  59. 1
      tools/doxygen/Doxyfile.c++.internal
  60. 1
      tools/doxygen/Doxyfile.core.internal
  61. 2
      tools/internal_ci/linux/grpc_microbenchmark_diff_in_docker.sh
  62. 1
      tools/profiling/microbenchmarks/bm_diff/bm_constants.py

@ -1466,7 +1466,6 @@ grpc_cc_library(
"//src/core:lib/transport/status_conversion.h",
"//src/core:lib/transport/timeout_encoding.h",
"//src/core:lib/transport/transport.h",
"//src/core:lib/transport/transport_impl.h",
] +
# TODO(vigneshbabu): remove these
# These headers used to be vended by this target, but they have to be
@ -3139,7 +3138,6 @@ grpc_cc_library(
"//src/core:status_helper",
"//src/core:subchannel_interface",
"//src/core:time",
"//src/core:transport_fwd",
"//src/core:try_seq",
"//src/core:unique_type_name",
"//src/core:useful",
@ -3616,7 +3614,6 @@ grpc_cc_library(
"//src/core:race",
"//src/core:slice",
"//src/core:slice_buffer",
"//src/core:transport_fwd",
],
)

1
Package.swift generated

@ -1722,7 +1722,6 @@ let package = Package(
"src/core/lib/transport/transport.cc",
"src/core/lib/transport/transport.h",
"src/core/lib/transport/transport_fwd.h",
"src/core/lib/transport/transport_impl.h",
"src/core/lib/transport/transport_op_string.cc",
"src/core/lib/uri/uri_parser.cc",
"src/core/lib/uri/uri_parser.h",

@ -1001,7 +1001,6 @@ libs:
- src/core/lib/transport/timeout_encoding.h
- src/core/lib/transport/transport.h
- src/core/lib/transport/transport_fwd.h
- src/core/lib/transport/transport_impl.h
- src/core/lib/uri/uri_parser.h
- src/core/tsi/alts/crypt/gsec.h
- src/core/tsi/alts/frame_protector/alts_counter.h
@ -2393,7 +2392,6 @@ libs:
- src/core/lib/transport/timeout_encoding.h
- src/core/lib/transport/transport.h
- src/core/lib/transport/transport_fwd.h
- src/core/lib/transport/transport_impl.h
- src/core/lib/uri/uri_parser.h
- src/core/tsi/alts/handshaker/transport_security_common_api.h
- src/core/tsi/fake_transport_security.h
@ -4408,7 +4406,6 @@ libs:
- src/core/lib/transport/timeout_encoding.h
- src/core/lib/transport/transport.h
- src/core/lib/transport/transport_fwd.h
- src/core/lib/transport/transport_impl.h
- src/core/lib/uri/uri_parser.h
- src/core/tsi/alts/handshaker/transport_security_common_api.h
- src/core/tsi/transport_security.h
@ -15978,7 +15975,6 @@ targets:
- src/core/lib/transport/timeout_encoding.h
- src/core/lib/transport/transport.h
- src/core/lib/transport/transport_fwd.h
- src/core/lib/transport/transport_impl.h
- src/core/lib/uri/uri_parser.h
- src/core/tsi/alts/handshaker/transport_security_common_api.h
- third_party/upb/upb/generated_code_support.h

2
gRPC-C++.podspec generated

@ -1098,7 +1098,6 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_fwd.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/uri/uri_parser.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',
@ -2172,7 +2171,6 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_fwd.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/uri/uri_parser.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',

2
gRPC-Core.podspec generated

@ -1821,7 +1821,6 @@ Pod::Spec.new do |s|
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_fwd.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/lib/uri/uri_parser.h',
@ -2930,7 +2929,6 @@ Pod::Spec.new do |s|
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
'src/core/lib/transport/transport_fwd.h',
'src/core/lib/transport/transport_impl.h',
'src/core/lib/uri/uri_parser.h',
'src/core/tsi/alts/crypt/gsec.h',
'src/core/tsi/alts/frame_protector/alts_counter.h',

1
grpc.gemspec generated

@ -1724,7 +1724,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/transport.cc )
s.files += %w( src/core/lib/transport/transport.h )
s.files += %w( src/core/lib/transport/transport_fwd.h )
s.files += %w( src/core/lib/transport/transport_impl.h )
s.files += %w( src/core/lib/transport/transport_op_string.cc )
s.files += %w( src/core/lib/uri/uri_parser.cc )
s.files += %w( src/core/lib/uri/uri_parser.h )

1
package.xml generated

@ -1706,7 +1706,6 @@
<file baseinstalldir="/" name="src/core/lib/transport/transport.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport_fwd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport_impl.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport_op_string.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/uri/uri_parser.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/uri/uri_parser.h" role="src" />

@ -3953,7 +3953,6 @@ grpc_cc_library(
"json_args",
"json_object_loader",
"service_config_parser",
"transport_fwd",
"validation_errors",
"//:config",
"//:gpr",
@ -5867,7 +5866,6 @@ grpc_cc_library(
"status_helper",
"tcp_connect_handshaker",
"time",
"transport_fwd",
"unique_type_name",
"//:channel_arg_names",
"//:config",
@ -5919,7 +5917,6 @@ grpc_cc_library(
"resource_quota",
"status_helper",
"time",
"transport_fwd",
"unique_type_name",
"//:channel_arg_names",
"//:chttp2_legacy_frame",
@ -5970,7 +5967,6 @@ grpc_cc_library(
"slice_buffer",
"status_helper",
"time",
"transport_fwd",
"//:channel_arg_names",
"//:config",
"//:debug_location",

@ -29,7 +29,6 @@
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/resolved_address.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
namespace grpc_core {
@ -51,7 +50,7 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
struct Result {
// The connected transport.
grpc_transport* transport = nullptr;
Transport* transport = nullptr;
// Channel args to be passed to filters.
ChannelArgs channel_args;
// Channelz socket node of the connected transport, if any.
@ -59,7 +58,7 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
void Reset() {
if (transport != nullptr) {
grpc_transport_destroy(transport);
transport->Orphan();
transport = nullptr;
}
channel_args = ChannelArgs();

@ -66,7 +66,7 @@
#include "src/core/lib/surface/init_internally.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
// Backoff parameters.
#define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1

@ -47,8 +47,7 @@
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/slice/percent_encoding.h"
#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
@ -90,7 +89,8 @@ HttpSchemeMetadata::ValueType SchemeFromArgs(const ChannelArgs& args) {
return scheme;
}
Slice UserAgentFromArgs(const ChannelArgs& args, const char* transport_name) {
Slice UserAgentFromArgs(const ChannelArgs& args,
absl::string_view transport_name) {
std::vector<std::string> fields;
auto add = [&fields](absl::string_view x) {
if (!x.empty()) fields.push_back(std::string(x));
@ -150,12 +150,13 @@ HttpClientFilter::HttpClientFilter(HttpSchemeMetadata::ValueType scheme,
absl::StatusOr<HttpClientFilter> HttpClientFilter::Create(
const ChannelArgs& args, ChannelFilter::Args) {
auto* transport = args.GetObject<grpc_transport>();
auto* transport = args.GetObject<Transport>();
if (transport == nullptr) {
return absl::InvalidArgumentError("HttpClientFilter needs a transport");
}
return HttpClientFilter(
SchemeFromArgs(args), UserAgentFromArgs(args, transport->vtable->name),
SchemeFromArgs(args),
UserAgentFromArgs(args, transport->GetTransportName()),
args.GetInt(GRPC_ARG_TEST_ONLY_USE_PUT_REQUESTS).value_or(false));
}

@ -18,7 +18,7 @@
#include <grpc/support/port_platform.h>
#include <string.h>
#include "absl/strings/match.h"
#include "src/core/ext/filters/http/client/http_client_filter.h"
#include "src/core/ext/filters/http/message_compress/compression_filter.h"
@ -27,14 +27,13 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
namespace {
bool IsBuildingHttpLikeTransport(const ChannelArgs& args) {
grpc_transport* t = args.GetObject<grpc_transport>();
return t != nullptr && strstr(t->vtable->name, "http");
auto* t = args.GetObject<Transport>();
return t != nullptr && absl::StrContains(t->GetTransportName(), "http");
}
} // namespace

@ -39,8 +39,7 @@
#include "src/core/lib/security/context/security_context.h"
#include "src/core/lib/service_config/service_config_call_data.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
namespace grpc_core {
@ -85,17 +84,17 @@ absl::StatusOr<RbacFilter> RbacFilter::Create(const ChannelArgs& args,
if (auth_context == nullptr) {
return GRPC_ERROR_CREATE("No auth context found");
}
auto* transport = args.GetObject<grpc_transport>();
auto* transport = args.GetObject<Transport>();
if (transport == nullptr) {
// This should never happen since the transport is always set on the server
// side.
return GRPC_ERROR_CREATE("No transport configured");
}
return RbacFilter(grpc_channel_stack_filter_instance_number(
filter_args.channel_stack(),
filter_args.uninitialized_channel_element()),
EvaluateArgs::PerChannelArgs(
auth_context, grpc_transport_get_endpoint(transport)));
return RbacFilter(
grpc_channel_stack_filter_instance_number(
filter_args.channel_stack(),
filter_args.uninitialized_channel_element()),
EvaluateArgs::PerChannelArgs(auth_context, transport->GetEndpoint()));
}
void RbacFilterRegister(CoreConfiguration::Builder* builder) {

@ -80,7 +80,7 @@ class BinderConnector : public grpc_core::SubchannelConnector {
void OnConnected(std::unique_ptr<grpc_binder::Binder> endpoint_binder) {
GPR_ASSERT(endpoint_binder != nullptr);
grpc_transport* transport = grpc_create_binder_transport_client(
grpc_core::Transport* transport = grpc_create_binder_transport_client(
std::move(endpoint_binder),
grpc_binder::GetSecurityPolicySetting()->Get(conn_id_));
GPR_ASSERT(transport != nullptr);

@ -47,7 +47,7 @@ grpc_channel* CreateDirectBinderChannelImplForTesting(
security_policy) {
grpc_core::ExecCtx exec_ctx;
grpc_transport* transport = grpc_create_binder_transport_client(
grpc_core::Transport* transport = grpc_create_binder_transport_client(
std::move(endpoint_binder), security_policy);
GPR_ASSERT(transport != nullptr);

@ -210,7 +210,7 @@ class BinderServerListener : public Server::ListenerInterface {
client_binder->Initialize();
// Finish the second half of SETUP_TRANSPORT in
// grpc_create_binder_transport_server().
grpc_transport* server_transport = grpc_create_binder_transport_server(
Transport* server_transport = grpc_create_binder_transport_server(
std::move(client_binder), security_policy_);
GPR_ASSERT(server_transport);
grpc_error_handle error = server_->SetupTransport(

@ -102,36 +102,26 @@ static void register_stream_locked(void* arg, grpc_error_handle /*error*/) {
args->gbt->registered_stream[args->gbs->GetTxCode()] = args->gbs;
}
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena) {
gpr_log(GPR_INFO, "%s = %p %p %p %p %p", __func__, gt, gs, refcount,
void grpc_binder_transport::InitStream(grpc_stream* gs,
grpc_stream_refcount* refcount,
const void* server_data,
grpc_core::Arena* arena) {
gpr_log(GPR_INFO, "%s = %p %p %p %p %p", __func__, this, gs, refcount,
server_data, arena);
// Note that this function is not locked and may be invoked concurrently
grpc_binder_transport* t = reinterpret_cast<grpc_binder_transport*>(gt);
new (gs) grpc_binder_stream(t, refcount, server_data, arena,
t->NewStreamTxCode(), t->is_client);
new (gs) grpc_binder_stream(this, refcount, server_data, arena,
NewStreamTxCode(), is_client);
// `grpc_binder_transport::registered_stream` should only be updated in
// combiner
grpc_binder_stream* gbs = reinterpret_cast<grpc_binder_stream*>(gs);
gbs->register_stream_args.gbs = gbs;
gbs->register_stream_args.gbt = t;
gbs->register_stream_args.gbt = this;
grpc_core::ExecCtx exec_ctx;
t->combiner->Run(
combiner->Run(
GRPC_CLOSURE_INIT(&gbs->register_stream_closure, register_stream_locked,
&gbs->register_stream_args, nullptr),
absl::OkStatus());
return 0;
}
static void set_pollset(grpc_transport* gt, grpc_stream* gs, grpc_pollset* gp) {
gpr_log(GPR_INFO, "%s = %p %p %p", __func__, gt, gs, gp);
}
static void set_pollset_set(grpc_transport*, grpc_stream*, grpc_pollset_set*) {
gpr_log(GPR_INFO, __func__);
}
static void AssignMetadata(grpc_metadata_batch* mb,
@ -380,7 +370,7 @@ static void accept_stream_locked(void* gt, grpc_error_handle /*error*/) {
if (gbt->accept_stream_fn) {
gpr_log(GPR_INFO, "Accepting a stream");
// must pass in a non-null value.
(*gbt->accept_stream_fn)(gbt->accept_stream_user_data, &gbt->base, gbt);
(*gbt->accept_stream_fn)(gbt->accept_stream_user_data, gbt, gbt);
} else {
++gbt->accept_stream_fn_called_count_;
gpr_log(GPR_INFO, "accept_stream_fn not set, current count = %d",
@ -577,17 +567,16 @@ static void perform_stream_op_locked(void* stream_op,
GRPC_BINDER_STREAM_UNREF(gbs, "perform_stream_op");
}
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
grpc_binder_transport* gbt = reinterpret_cast<grpc_binder_transport*>(gt);
void grpc_binder_transport::PerformStreamOp(
grpc_stream* gs, grpc_transport_stream_op_batch* op) {
grpc_binder_stream* gbs = reinterpret_cast<grpc_binder_stream*>(gs);
gpr_log(GPR_INFO, "%s = %p %p %p is_client = %d", __func__, gt, gs, op,
gpr_log(GPR_INFO, "%s = %p %p %p is_client = %d", __func__, this, gs, op,
gbs->is_client);
GRPC_BINDER_STREAM_REF(gbs, "perform_stream_op");
op->handler_private.extra_arg = gbs;
gbt->combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_stream_op_locked, op, nullptr),
absl::OkStatus());
combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_stream_op_locked, op, nullptr),
absl::OkStatus());
}
static void close_transport_locked(grpc_binder_transport* gbt) {
@ -644,15 +633,13 @@ static void perform_transport_op_locked(void* transport_op,
GRPC_BINDER_UNREF_TRANSPORT(gbt, "perform_transport_op");
}
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
void grpc_binder_transport::PerformOp(grpc_transport_op* op) {
gpr_log(GPR_INFO, __func__);
grpc_binder_transport* gbt = reinterpret_cast<grpc_binder_transport*>(gt);
op->handler_private.extra_arg = gbt;
GRPC_BINDER_REF_TRANSPORT(gbt, "perform_transport_op");
gbt->combiner->Run(
GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_transport_op_locked, op, nullptr),
absl::OkStatus());
op->handler_private.extra_arg = this;
GRPC_BINDER_REF_TRANSPORT(this, "perform_transport_op");
combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_transport_op_locked, op, nullptr),
absl::OkStatus());
}
static void destroy_stream_locked(void* sp, grpc_error_handle /*error*/) {
@ -666,8 +653,8 @@ static void destroy_stream_locked(void* sp, grpc_error_handle /*error*/) {
gbs->~grpc_binder_stream();
}
static void destroy_stream(grpc_transport* /*gt*/, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
void grpc_binder_transport::DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) {
gpr_log(GPR_INFO, __func__);
grpc_binder_stream* gbs = reinterpret_cast<grpc_binder_stream*>(gs);
gbs->destroy_stream_then_closure = then_schedule_closure;
@ -686,34 +673,20 @@ static void destroy_transport_locked(void* gt, grpc_error_handle /*error*/) {
GRPC_BINDER_UNREF_TRANSPORT(gbt, "transport destroyed");
}
static void destroy_transport(grpc_transport* gt) {
void grpc_binder_transport::Orphan() {
gpr_log(GPR_INFO, __func__);
grpc_binder_transport* gbt = reinterpret_cast<grpc_binder_transport*>(gt);
gbt->combiner->Run(
GRPC_CLOSURE_CREATE(destroy_transport_locked, gbt, nullptr),
absl::OkStatus());
combiner->Run(GRPC_CLOSURE_CREATE(destroy_transport_locked, this, nullptr),
absl::OkStatus());
}
static grpc_endpoint* get_endpoint(grpc_transport*) {
grpc_endpoint* grpc_binder_transport::GetEndpoint() {
gpr_log(GPR_INFO, __func__);
return nullptr;
}
// See grpc_transport_vtable declaration for meaning of each field
static const grpc_transport_vtable vtable = {sizeof(grpc_binder_stream),
false,
"binder",
init_stream,
nullptr,
set_pollset,
set_pollset_set,
perform_stream_op,
perform_transport_op,
destroy_stream,
destroy_transport,
get_endpoint};
static const grpc_transport_vtable* get_vtable() { return &vtable; }
size_t grpc_binder_transport::SizeOfStream() const {
return sizeof(grpc_binder_stream);
}
grpc_binder_transport::grpc_binder_transport(
std::unique_ptr<grpc_binder::Binder> binder, bool is_client,
@ -726,7 +699,6 @@ grpc_binder_transport::grpc_binder_transport(
GRPC_CHANNEL_READY),
refs(1, nullptr) {
gpr_log(GPR_INFO, __func__);
base.vtable = get_vtable();
transport_stream_receiver =
std::make_shared<grpc_binder::TransportStreamReceiverImpl>(
is_client, /*accept_stream_callback=*/[this] {
@ -751,7 +723,7 @@ grpc_binder_transport::~grpc_binder_transport() {
GRPC_COMBINER_UNREF(combiner, "binder_transport");
}
grpc_transport* grpc_create_binder_transport_client(
grpc_core::Transport* grpc_create_binder_transport_client(
std::unique_ptr<grpc_binder::Binder> endpoint_binder,
std::shared_ptr<grpc::experimental::binder::SecurityPolicy>
security_policy) {
@ -763,10 +735,10 @@ grpc_transport* grpc_create_binder_transport_client(
grpc_binder_transport* t = new grpc_binder_transport(
std::move(endpoint_binder), /*is_client=*/true, security_policy);
return &t->base;
return t;
}
grpc_transport* grpc_create_binder_transport_server(
grpc_core::Transport* grpc_create_binder_transport_server(
std::unique_ptr<grpc_binder::Binder> client_binder,
std::shared_ptr<grpc::experimental::binder::SecurityPolicy>
security_policy) {
@ -778,6 +750,6 @@ grpc_transport* grpc_create_binder_transport_server(
grpc_binder_transport* t = new grpc_binder_transport(
std::move(client_binder), /*is_client=*/false, security_policy);
return &t->base;
return t;
}
#endif

@ -35,7 +35,6 @@
#include "src/core/lib/gprpp/crash.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
struct grpc_binder_stream;
@ -43,12 +42,36 @@ struct grpc_binder_stream;
// depends on what style we want to follow)
// TODO(mingcl): Decide casing for this class name. Should we use C-style class
// name here or just go with C++ style?
struct grpc_binder_transport {
struct grpc_binder_transport final : public grpc_core::Transport,
public grpc_core::FilterStackTransport {
explicit grpc_binder_transport(
std::unique_ptr<grpc_binder::Binder> binder, bool is_client,
std::shared_ptr<grpc::experimental::binder::SecurityPolicy>
security_policy);
~grpc_binder_transport();
~grpc_binder_transport() override;
grpc_core::FilterStackTransport* filter_stack_transport() override {
return this;
}
grpc_core::ClientTransport* client_transport() override { return nullptr; }
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override { return "binder"; }
void InitStream(grpc_stream* gs, grpc_stream_refcount* refcount,
const void* server_data, grpc_core::Arena* arena) override;
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override {}
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override {}
void PerformOp(grpc_transport_op* op) override;
grpc_endpoint* GetEndpoint() override;
size_t SizeOfStream() const override;
bool HackyDisableStreamOpBatchCoalescingInConnectedChannel() const override {
return false;
}
void PerformStreamOp(grpc_stream* gs,
grpc_transport_stream_op_batch* op) override;
void DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) override;
void Orphan() override;
int NewStreamTxCode() {
// TODO(mingcl): Wrap around when all tx codes are used. "If we do detect a
@ -58,8 +81,6 @@ struct grpc_binder_transport {
return next_free_tx_code++;
}
grpc_transport base; // must be first
std::shared_ptr<grpc_binder::TransportStreamReceiver>
transport_stream_receiver;
grpc_core::OrphanablePtr<grpc_binder::WireReader> wire_reader;
@ -73,7 +94,7 @@ struct grpc_binder_transport {
// The callback and the data for the callback when the stream is connected
// between client and server. registered_method_matcher_cb is called before
// invoking the recv initial metadata callback.
void (*accept_stream_fn)(void* user_data, grpc_transport* transport,
void (*accept_stream_fn)(void* user_data, grpc_core::Transport* transport,
const void* server_data) = nullptr;
void (*registered_method_matcher_cb)(
void* user_data, grpc_core::ServerMetadata* metadata) = nullptr;
@ -90,11 +111,11 @@ struct grpc_binder_transport {
std::atomic<int> next_free_tx_code{grpc_binder::kFirstCallId};
};
grpc_transport* grpc_create_binder_transport_client(
grpc_core::Transport* grpc_create_binder_transport_client(
std::unique_ptr<grpc_binder::Binder> endpoint_binder,
std::shared_ptr<grpc::experimental::binder::SecurityPolicy>
security_policy);
grpc_transport* grpc_create_binder_transport_server(
grpc_core::Transport* grpc_create_binder_transport_server(
std::unique_ptr<grpc_binder::Binder> client_binder,
std::shared_ptr<grpc::experimental::binder::SecurityPolicy>
security_policy);

@ -71,7 +71,6 @@
#include "src/core/lib/transport/handshaker_registry.h"
#include "src/core/lib/transport/tcp_connect_handshaker.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
@ -408,7 +407,7 @@ grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
grpc_fd_create(fd, "client", true),
grpc_event_engine::experimental::ChannelArgsEndpointConfig(final_args),
"fd-client");
grpc_transport* transport =
grpc_core::Transport* transport =
grpc_create_chttp2_transport(final_args, client, true);
GPR_ASSERT(transport);
auto channel = grpc_core::Channel::Create(
@ -418,7 +417,7 @@ grpc_channel* grpc_channel_create_from_fd(const char* target, int fd,
grpc_core::ExecCtx::Get()->Flush();
return channel->release()->c_ptr();
} else {
grpc_transport_destroy(transport);
transport->Orphan();
return grpc_lame_client_channel_create(
target, static_cast<grpc_status_code>(channel.status().code()),
"Failed to create client channel");

@ -83,7 +83,6 @@
#include "src/core/lib/transport/handshaker.h"
#include "src/core/lib/transport/handshaker_registry.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/uri/uri_parser.h"
#ifdef GPR_SUPPORT_CHANNELS_FROM_FD
@ -427,7 +426,7 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnTimeout() {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->disconnect_with_error = GRPC_ERROR_CREATE(
"Did not receive HTTP/2 settings before handshake timeout");
grpc_transport_perform_op(&transport->base, op);
transport->PerformOp(op);
}
}
@ -473,7 +472,7 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// handshaker may have handed off the connection to some external
// code, so we can just clean up here without creating a transport.
if (args->endpoint != nullptr) {
grpc_transport* transport =
Transport* transport =
grpc_create_chttp2_transport(args->args, args->endpoint, false);
grpc_error_handle channel_init_err =
self->connection_->listener_->server_->SetupTransport(
@ -484,7 +483,7 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// handshake deadline.
// Note: The reinterpret_cast<>s here are safe, because
// grpc_chttp2_transport is a C-style extension of
// grpc_transport, so this is morally equivalent of a
// Transport, so this is morally equivalent of a
// static_cast<> to a derived class.
// TODO(roth): Change to static_cast<> when we C++-ify the
// transport API.
@ -523,7 +522,7 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// Failed to create channel from transport. Clean up.
gpr_log(GPR_ERROR, "Failed to create channel: %s",
StatusToString(channel_init_err).c_str());
grpc_transport_destroy(transport);
transport->Orphan();
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
cleanup_connection = true;
@ -608,7 +607,7 @@ void Chttp2ServerListener::ActiveConnection::SendGoAway() {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->goaway_error =
GRPC_ERROR_CREATE("Server is stopping to serve requests.");
grpc_transport_perform_op(&transport->base, op);
transport->PerformOp(op);
}
}
@ -668,7 +667,7 @@ void Chttp2ServerListener::ActiveConnection::OnDrainGraceTimeExpiry() {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
op->disconnect_with_error = GRPC_ERROR_CREATE(
"Drain grace time expired. Closing connection immediately.");
grpc_transport_perform_op(&transport->base, op);
transport->PerformOp(op);
}
}
@ -1074,7 +1073,7 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
grpc_fd_create(fd, name.c_str(), true),
grpc_event_engine::experimental::ChannelArgsEndpointConfig(server_args),
name);
grpc_transport* transport = grpc_create_chttp2_transport(
grpc_core::Transport* transport = grpc_create_chttp2_transport(
server_args, server_endpoint, false // is_client
);
grpc_error_handle error =
@ -1087,7 +1086,7 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
} else {
gpr_log(GPR_ERROR, "Failed to create channel: %s",
grpc_core::StatusToString(error).c_str());
grpc_transport_destroy(transport);
transport->Orphan();
}
}

@ -105,7 +105,6 @@
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/status_conversion.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
#ifdef GRPC_POSIX_SOCKET_TCP
#include "src/core/lib/iomgr/ev_posix.h"
@ -370,8 +369,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
}
}
static const grpc_transport_vtable* get_vtable(void);
static void read_channel_args(grpc_chttp2_transport* t,
const grpc_core::ChannelArgs& channel_args,
bool is_client) {
@ -458,7 +455,7 @@ static void read_channel_args(grpc_chttp2_transport* t,
grpc_core::MakeRefCounted<grpc_core::channelz::SocketNode>(
std::string(grpc_endpoint_get_local_address(t->ep)),
std::string(t->peer_string.as_string_view()),
absl::StrCat(get_vtable()->name, " ",
absl::StrCat(t->GetTransportName(), " ",
t->peer_string.as_string_view()),
channel_args
.GetObjectRef<grpc_core::channelz::SocketNode::Security>());
@ -611,9 +608,11 @@ static void init_keepalive_pings_if_enabled_locked(
grpc_chttp2_transport::grpc_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client)
: refs(1, GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_refcount)
? "chttp2_refcount"
: nullptr),
: grpc_core::RefCounted<grpc_chttp2_transport,
grpc_core::NonPolymorphicRefCount>(
GRPC_TRACE_FLAG_ENABLED(grpc_trace_chttp2_refcount)
? "chttp2_refcount"
: nullptr),
ep(ep),
peer_string(
grpc_core::Slice::FromCopiedString(grpc_endpoint_get_peer(ep))),
@ -640,7 +639,6 @@ grpc_chttp2_transport::grpc_chttp2_transport(
cl = new grpc_core::ContextList();
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
base.vtable = get_vtable();
grpc_slice_buffer_init(&read_buffer);
if (is_client) {
@ -716,10 +714,9 @@ static void destroy_transport_locked(void* tp, grpc_error_handle /*error*/) {
t->memory_owner.Reset();
}
static void destroy_transport(grpc_transport* gt) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
t->combiner->Run(GRPC_CLOSURE_CREATE(destroy_transport_locked, t, nullptr),
absl::OkStatus());
void grpc_chttp2_transport::Orphan() {
combiner->Run(GRPC_CLOSURE_CREATE(destroy_transport_locked, this, nullptr),
absl::OkStatus());
}
static void close_transport_locked(grpc_chttp2_transport* t,
@ -894,12 +891,11 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, destroy_stream_arg, absl::OkStatus());
}
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);
return 0;
void grpc_chttp2_transport::InitStream(grpc_stream* gs,
grpc_stream_refcount* refcount,
const void* server_data,
grpc_core::Arena* arena) {
new (gs) grpc_chttp2_stream(this, refcount, server_data, arena);
}
static void destroy_stream_locked(void* sp, grpc_error_handle /*error*/) {
@ -907,13 +903,12 @@ static void destroy_stream_locked(void* sp, grpc_error_handle /*error*/) {
s->~grpc_chttp2_stream();
}
static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
void grpc_chttp2_transport::DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) {
grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
s->destroy_stream_arg = then_schedule_closure;
t->combiner->Run(
combiner->Run(
GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s, nullptr),
absl::OkStatus());
}
@ -926,7 +921,7 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
grpc_chttp2_stream* accepting = nullptr;
GPR_ASSERT(t->accepting_stream == nullptr);
t->accepting_stream = &accepting;
t->accept_stream_cb(t->accept_stream_cb_user_data, &t->base,
t->accept_stream_cb(t->accept_stream_cb_user_data, t,
reinterpret_cast<void*>(id));
t->accepting_stream = nullptr;
return accepting;
@ -1663,12 +1658,11 @@ static void perform_stream_op_locked(void* stream_op,
GRPC_CHTTP2_STREAM_UNREF(s, "perform_stream_op");
}
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
void grpc_chttp2_transport::PerformStreamOp(
grpc_stream* gs, grpc_transport_stream_op_batch* op) {
grpc_chttp2_stream* s = reinterpret_cast<grpc_chttp2_stream*>(gs);
if (!t->is_client) {
if (!is_client) {
if (op->send_initial_metadata) {
GPR_ASSERT(!op->payload->send_initial_metadata.send_initial_metadata
->get(grpc_core::GrpcTimeoutMetadata())
@ -1688,9 +1682,9 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
op->handler_private.extra_arg = gs;
t->combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_stream_op_locked, op, nullptr),
absl::OkStatus());
combiner->Run(GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_stream_op_locked, op, nullptr),
absl::OkStatus());
}
static void cancel_pings(grpc_chttp2_transport* t, grpc_error_handle error) {
@ -2024,14 +2018,13 @@ static void perform_transport_op_locked(void* stream_op,
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, absl::OkStatus());
}
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
void grpc_chttp2_transport::PerformOp(grpc_transport_op* op) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", t,
gpr_log(GPR_INFO, "perform_transport_op[t=%p]: %s", this,
grpc_transport_op_string(op).c_str());
}
op->handler_private.extra_arg = gt;
t->Ref().release()->combiner->Run(
op->handler_private.extra_arg = this;
Ref().release()->combiner->Run(
GRPC_CLOSURE_INIT(&op->handler_private.closure,
perform_transport_op_locked, op, nullptr),
absl::OkStatus());
@ -3078,16 +3071,14 @@ static void connectivity_state_set(grpc_chttp2_transport* t,
// POLLSET STUFF
//
static void set_pollset(grpc_transport* gt, grpc_stream* /*gs*/,
grpc_pollset* pollset) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
grpc_endpoint_add_to_pollset(t->ep, pollset);
void grpc_chttp2_transport::SetPollset(grpc_stream* /*gs*/,
grpc_pollset* pollset) {
grpc_endpoint_add_to_pollset(ep, pollset);
}
static void set_pollset_set(grpc_transport* gt, grpc_stream* /*gs*/,
grpc_pollset_set* pollset_set) {
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
grpc_endpoint_add_to_pollset_set(t->ep, pollset_set);
void grpc_chttp2_transport::SetPollsetSet(grpc_stream* /*gs*/,
grpc_pollset_set* pollset_set) {
grpc_endpoint_add_to_pollset_set(ep, pollset_set);
}
//
@ -3245,41 +3236,36 @@ const char* grpc_chttp2_initiate_write_reason_string(
GPR_UNREACHABLE_CODE(return "unknown");
}
static grpc_endpoint* chttp2_get_endpoint(grpc_transport* t) {
return (reinterpret_cast<grpc_chttp2_transport*>(t))->ep;
grpc_endpoint* grpc_chttp2_transport::GetEndpoint() { return ep; }
size_t grpc_chttp2_transport::SizeOfStream() const {
return sizeof(grpc_chttp2_stream);
}
static const grpc_transport_vtable vtable = {sizeof(grpc_chttp2_stream),
false,
"chttp2",
init_stream,
nullptr,
set_pollset,
set_pollset_set,
perform_stream_op,
perform_transport_op,
destroy_stream,
destroy_transport,
chttp2_get_endpoint};
bool grpc_chttp2_transport::
HackyDisableStreamOpBatchCoalescingInConnectedChannel() const {
return false;
}
static const grpc_transport_vtable* get_vtable(void) { return &vtable; }
absl::string_view grpc_chttp2_transport::GetTransportName() const {
return "chttp2";
}
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_transport* transport) {
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport) {
grpc_chttp2_transport* t =
reinterpret_cast<grpc_chttp2_transport*>(transport);
return t->channelz_socket;
}
grpc_transport* grpc_create_chttp2_transport(
grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client) {
auto t = new grpc_chttp2_transport(channel_args, ep, is_client);
return &t->base;
return new grpc_chttp2_transport(channel_args, ep, is_client);
}
void grpc_chttp2_transport_start_reading(
grpc_transport* transport, grpc_slice_buffer* read_buffer,
grpc_core::Transport* transport, grpc_slice_buffer* read_buffer,
grpc_closure* notify_on_receive_settings, grpc_closure* notify_on_close) {
auto t = reinterpret_cast<grpc_chttp2_transport*>(transport)->Ref();
if (read_buffer != nullptr) {

@ -41,19 +41,19 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_hpack_parser;
/// Creates a CHTTP2 Transport. This takes ownership of a \a resource_user ref
/// from the caller; if the caller still needs the resource_user after creating
/// a transport, the caller must take another ref.
grpc_transport* grpc_create_chttp2_transport(
grpc_core::Transport* grpc_create_chttp2_transport(
const grpc_core::ChannelArgs& channel_args, grpc_endpoint* ep,
bool is_client);
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_transport* transport);
grpc_chttp2_transport_get_socket_node(grpc_core::Transport* transport);
/// Takes ownership of \a read_buffer, which (if non-NULL) contains
/// leftover bytes previously read from the endpoint (e.g., by handshakers).
/// If non-null, \a notify_on_receive_settings will be scheduled when
/// HTTP/2 settings are received from the peer.
void grpc_chttp2_transport_start_reading(
grpc_transport* transport, grpc_slice_buffer* read_buffer,
grpc_core::Transport* transport, grpc_slice_buffer* read_buffer,
grpc_closure* notify_on_receive_settings, grpc_closure* notify_on_close);
namespace grpc_core {

@ -68,6 +68,7 @@
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/slice/slice.h"
@ -76,8 +77,6 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
// Flag that this closure barrier may be covering a write in a pollset, and so
// we should not complete this closure until we can prove that the write got
@ -234,26 +233,40 @@ typedef enum {
GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED,
} grpc_chttp2_keepalive_state;
struct grpc_chttp2_transport : public grpc_core::KeepsGrpcInitialized {
struct grpc_chttp2_transport final
: public grpc_core::Transport,
public grpc_core::FilterStackTransport,
public grpc_core::RefCounted<grpc_chttp2_transport,
grpc_core::NonPolymorphicRefCount>,
public grpc_core::KeepsGrpcInitialized {
grpc_chttp2_transport(const grpc_core::ChannelArgs& channel_args,
grpc_endpoint* ep, bool is_client);
~grpc_chttp2_transport();
// Make this be able to be contained in RefCountedPtr<>
// Can't yet make this derive from RefCounted because we need to keep
// `grpc_transport base` first.
// TODO(ctiller): Make a transport interface.
void IncrementRefCount() { refs.Ref(); }
void Unref() {
if (refs.Unref()) delete this;
}
grpc_core::RefCountedPtr<grpc_chttp2_transport> Ref() {
IncrementRefCount();
return grpc_core::RefCountedPtr<grpc_chttp2_transport>(this);
~grpc_chttp2_transport() override;
void Orphan() override;
size_t SizeOfStream() const override;
bool HackyDisableStreamOpBatchCoalescingInConnectedChannel() const override;
void PerformStreamOp(grpc_stream* gs,
grpc_transport_stream_op_batch* op) override;
void DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) override;
grpc_core::FilterStackTransport* filter_stack_transport() override {
return this;
}
grpc_core::ClientTransport* client_transport() override { return nullptr; }
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override;
void InitStream(grpc_stream* gs, grpc_stream_refcount* refcount,
const void* server_data, grpc_core::Arena* arena) override;
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override;
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override;
void PerformOp(grpc_transport_op* op) override;
grpc_endpoint* GetEndpoint() override;
grpc_transport base; // must be first
grpc_core::RefCount refs;
grpc_endpoint* ep;
grpc_core::Slice peer_string;
@ -317,7 +330,7 @@ struct grpc_chttp2_transport : public grpc_core::KeepsGrpcInitialized {
grpc_chttp2_stream** accepting_stream = nullptr;
// accept stream callback
void (*accept_stream_cb)(void* user_data, grpc_transport* transport,
void (*accept_stream_cb)(void* user_data, grpc_core::Transport* transport,
const void* server_data);
// registered_method_matcher_cb is called before invoking the recv initial
// metadata callback.

@ -33,17 +33,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
// Cronet transport object
typedef struct cronet_transport {
grpc_transport base; // must be first element in this structure
void* engine;
char* host;
} cronet_transport;
extern grpc_transport_vtable grpc_cronet_vtable;
#include "src/core/lib/transport/transport.h"
GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
void* engine, const char* target, const grpc_channel_args* args,
@ -58,7 +48,7 @@ GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
.PreconditionChannelArgs(args)
.Set(GRPC_ARG_DISABLE_CLIENT_AUTHORITY_FILTER, 1);
grpc_transport* ct = grpc_create_cronet_transport(
grpc_core::Transport* ct = grpc_create_cronet_transport(
engine, target, channel_args.ToC().get(), reserved);
grpc_core::ExecCtx exec_ctx;

@ -61,7 +61,6 @@
#include "src/core/lib/surface/validate_metadata.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
// IWYU pragma: no_include <type_traits>
@ -121,8 +120,30 @@ static bidirectional_stream_callback cronet_callbacks = {
on_canceled};
// Cronet transport object
struct grpc_cronet_transport {
grpc_transport base; // must be first element in this structure
struct grpc_cronet_transport final : public grpc_core::Transport,
public grpc_core::FilterStackTransport {
FilterStackTransport* filter_stack_transport() override { return this; }
grpc_core::ClientTransport* client_transport() override { return nullptr; }
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override { return "cronet_http"; }
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override {}
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override {}
void PerformOp(grpc_transport_op* op) override;
grpc_endpoint* GetEndpoint() override { return nullptr; }
size_t SizeOfStream() const override;
void InitStream(grpc_stream* gs, grpc_stream_refcount* refcount,
const void* server_data, grpc_core::Arena* arena) override;
bool HackyDisableStreamOpBatchCoalescingInConnectedChannel() const override {
return true;
}
void PerformStreamOp(grpc_stream* gs,
grpc_transport_stream_op_batch* op) override;
void DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) override;
void Orphan() override {}
stream_engine* engine;
char* host;
bool use_packet_coalescing;
@ -203,7 +224,7 @@ struct op_storage {
};
struct stream_obj {
stream_obj(grpc_transport* gt, grpc_stream* gs,
stream_obj(grpc_core::Transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, grpc_core::Arena* arena);
~stream_obj();
@ -1384,7 +1405,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
// Functions used by upper layers to access transport functionality.
//
inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
inline stream_obj::stream_obj(grpc_core::Transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount,
grpc_core::Arena* arena)
: arena(arena),
@ -1398,22 +1419,15 @@ inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
inline stream_obj::~stream_obj() { null_and_maybe_free_read_buffer(this); }
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount,
const void* /*server_data*/, grpc_core::Arena* arena) {
new (gs) stream_obj(gt, gs, refcount, arena);
return 0;
void grpc_cronet_transport::InitStream(grpc_stream* gs,
grpc_stream_refcount* refcount,
const void* /*server_data*/,
grpc_core::Arena* arena) {
new (gs) stream_obj(this, gs, refcount, arena);
}
static void set_pollset_do_nothing(grpc_transport* /*gt*/, grpc_stream* /*gs*/,
grpc_pollset* /*pollset*/) {}
static void set_pollset_set_do_nothing(grpc_transport* /*gt*/,
grpc_stream* /*gs*/,
grpc_pollset_set* /*pollset_set*/) {}
static void perform_stream_op(grpc_transport* /*gt*/, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
void grpc_cronet_transport::PerformStreamOp(
grpc_stream* gs, grpc_transport_stream_op_batch* op) {
CRONET_LOG(GPR_DEBUG, "perform_stream_op");
if (op->send_initial_metadata &&
header_has_authority(
@ -1446,43 +1460,27 @@ static void perform_stream_op(grpc_transport* /*gt*/, grpc_stream* gs,
execute_from_storage(s);
}
static void destroy_stream(grpc_transport* /*gt*/, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
void grpc_cronet_transport::DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) {
stream_obj* s = reinterpret_cast<stream_obj*>(gs);
s->~stream_obj();
grpc_core::ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure,
absl::OkStatus());
}
static void destroy_transport(grpc_transport* /*gt*/) {}
static grpc_endpoint* get_endpoint(grpc_transport* /*gt*/) { return nullptr; }
static void perform_op(grpc_transport* /*gt*/, grpc_transport_op* /*op*/) {}
static const grpc_transport_vtable grpc_cronet_vtable = {
sizeof(stream_obj),
false,
"cronet_http",
init_stream,
nullptr,
set_pollset_do_nothing,
set_pollset_set_do_nothing,
perform_stream_op,
perform_op,
destroy_stream,
destroy_transport,
get_endpoint};
grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
const grpc_channel_args* args,
void* /*reserved*/) {
grpc_cronet_transport* ct = static_cast<grpc_cronet_transport*>(
gpr_malloc(sizeof(grpc_cronet_transport)));
void grpc_cronet_transport::PerformOp(grpc_transport_op* /*op*/) {}
size_t grpc_cronet_transport::SizeOfStream() const {
return sizeof(stream_obj);
}
grpc_core::Transport* grpc_create_cronet_transport(
void* engine, const char* target, const grpc_channel_args* args,
void* /*reserved*/) {
grpc_cronet_transport* ct = new grpc_cronet_transport();
if (!ct) {
goto error;
}
ct->base.vtable = &grpc_cronet_vtable;
ct->engine = static_cast<stream_engine*>(engine);
ct->host = static_cast<char*>(gpr_malloc(strlen(target) + 1));
if (!ct->host) {
@ -1505,14 +1503,14 @@ grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
}
}
return &ct->base;
return ct;
error:
if (ct) {
if (ct->host) {
gpr_free(ct->host);
}
gpr_free(ct);
delete ct;
}
return nullptr;

@ -25,8 +25,8 @@
#include "src/core/lib/transport/transport_fwd.h"
grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
const grpc_channel_args* args,
void* reserved);
grpc_core::Transport* grpc_create_cronet_transport(
void* engine, const char* target, const grpc_channel_args* args,
void* reserved);
#endif // GRPC_SRC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H

@ -18,6 +18,9 @@
#include <grpc/support/port_platform.h>
#include "src/core/ext/transport/inproc/legacy_inproc_transport.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
@ -40,12 +43,10 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/ext/transport/inproc/inproc_transport.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_args_preconditioning.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/status_helper.h"
@ -65,8 +66,6 @@
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#define INPROC_LOG(...) \
do { \
@ -102,26 +101,53 @@ struct shared_mu {
gpr_refcount refs;
};
struct inproc_transport {
inproc_transport(const grpc_transport_vtable* vtable, shared_mu* mu,
bool is_client)
struct inproc_transport final : public grpc_core::Transport,
public grpc_core::FilterStackTransport {
inproc_transport(shared_mu* mu, bool is_client)
: mu(mu),
is_client(is_client),
state_tracker(is_client ? "inproc_client" : "inproc_server",
GRPC_CHANNEL_READY) {
base.vtable = vtable;
// Start each side of transport with 2 refs since they each have a ref
// to the other
gpr_ref_init(&refs, 2);
}
~inproc_transport() {
~inproc_transport() override {
if (gpr_unref(&mu->refs)) {
mu->~shared_mu();
gpr_free(mu);
}
}
grpc_core::FilterStackTransport* filter_stack_transport() override {
return this;
}
grpc_core::ClientTransport* client_transport() override { return nullptr; }
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override;
void InitStream(grpc_stream* gs, grpc_stream_refcount* refcount,
const void* server_data, grpc_core::Arena* arena) override;
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override;
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override;
void PerformOp(grpc_transport_op* op) override;
grpc_endpoint* GetEndpoint() override;
size_t SizeOfStream() const override;
bool HackyDisableStreamOpBatchCoalescingInConnectedChannel() const override {
return true;
}
void PerformStreamOp(grpc_stream* gs,
grpc_transport_stream_op_batch* op) override;
void DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) override;
void Orphan() override;
void ref() {
INPROC_LOG(GPR_INFO, "ref_transport %p", this);
gpr_ref(&refs);
@ -137,12 +163,11 @@ struct inproc_transport {
gpr_free(this);
}
grpc_transport base;
shared_mu* mu;
gpr_refcount refs;
bool is_client;
grpc_core::ConnectivityStateTracker state_tracker;
void (*accept_stream_cb)(void* user_data, grpc_transport* transport,
void (*accept_stream_cb)(void* user_data, grpc_core::Transport* transport,
const void* server_data);
void (*registered_method_matcher_cb)(
void* user_data, grpc_core::ServerMetadata* metadata) = nullptr;
@ -179,7 +204,7 @@ struct inproc_stream {
// side to avoid destruction
INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p",
st->accept_stream_cb, st->accept_stream_data);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, this);
(*st->accept_stream_cb)(st->accept_stream_data, t, this);
} else {
// This is the server-side and is being called through accept_stream_cb
inproc_stream* cs = const_cast<inproc_stream*>(
@ -344,13 +369,12 @@ void fill_in_metadata(inproc_stream* s, const grpc_metadata_batch* metadata,
metadata->Encode(&sink);
}
int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena) {
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
new (gs) inproc_stream(t, refcount, server_data, arena);
return 0; // return value is not important
void inproc_transport::InitStream(grpc_stream* gs,
grpc_stream_refcount* refcount,
const void* server_data,
grpc_core::Arena* arena) {
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", this, gs, server_data);
new (gs) inproc_stream(this, refcount, server_data, arena);
}
void close_stream_locked(inproc_stream* s) {
@ -918,9 +942,9 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
return ret;
}
void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
void inproc_transport::PerformStreamOp(grpc_stream* gs,
grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", this, gs, op);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu);
@ -1115,21 +1139,20 @@ void close_transport_locked(inproc_transport* t) {
}
}
void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu);
void inproc_transport::PerformOp(grpc_transport_op* op) {
INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", this, op);
gpr_mu_lock(&mu->mu);
if (op->start_connectivity_watch != nullptr) {
t->state_tracker.AddWatcher(op->start_connectivity_watch_state,
std::move(op->start_connectivity_watch));
state_tracker.AddWatcher(op->start_connectivity_watch_state,
std::move(op->start_connectivity_watch));
}
if (op->stop_connectivity_watch != nullptr) {
t->state_tracker.RemoveWatcher(op->stop_connectivity_watch);
state_tracker.RemoveWatcher(op->stop_connectivity_watch);
}
if (op->set_accept_stream) {
t->accept_stream_cb = op->set_accept_stream_fn;
t->registered_method_matcher_cb = op->set_registered_method_matcher_fn;
t->accept_stream_data = op->set_accept_stream_user_data;
accept_stream_cb = op->set_accept_stream_fn;
registered_method_matcher_cb = op->set_registered_method_matcher_fn;
accept_stream_data = op->set_accept_stream_user_data;
}
if (op->on_consumed) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, absl::OkStatus());
@ -1144,78 +1167,69 @@ void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
}
if (do_close) {
close_transport_locked(t);
close_transport_locked(this);
}
gpr_mu_unlock(&t->mu->mu);
gpr_mu_unlock(&mu->mu);
}
void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) {
void inproc_transport::DestroyStream(grpc_stream* gs,
grpc_closure* then_schedule_closure) {
INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu_lock(&t->mu->mu);
gpr_mu_lock(&mu->mu);
close_stream_locked(s);
gpr_mu_unlock(&t->mu->mu);
gpr_mu_unlock(&mu->mu);
s->~inproc_stream();
grpc_core::ExecCtx::Run(DEBUG_LOCATION, then_schedule_closure,
absl::OkStatus());
}
void destroy_transport(grpc_transport* gt) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu);
close_transport_locked(t);
gpr_mu_unlock(&t->mu->mu);
t->other_side->unref();
t->unref();
void inproc_transport::Orphan() {
INPROC_LOG(GPR_INFO, "destroy_transport %p", this);
gpr_mu_lock(&mu->mu);
close_transport_locked(this);
gpr_mu_unlock(&mu->mu);
other_side->unref();
unref();
}
//******************************************************************************
// INTEGRATION GLUE
//
void set_pollset(grpc_transport* /*gt*/, grpc_stream* /*gs*/,
grpc_pollset* /*pollset*/) {
size_t inproc_transport::SizeOfStream() const { return sizeof(inproc_stream); }
absl::string_view inproc_transport::GetTransportName() const {
return "inproc";
}
void inproc_transport::SetPollset(grpc_stream* /*gs*/,
grpc_pollset* /*pollset*/) {
// Nothing to do here
}
void set_pollset_set(grpc_transport* /*gt*/, grpc_stream* /*gs*/,
grpc_pollset_set* /*pollset_set*/) {
void inproc_transport::SetPollsetSet(grpc_stream* /*gs*/,
grpc_pollset_set* /*pollset_set*/) {
// Nothing to do here
}
grpc_endpoint* get_endpoint(grpc_transport* /*t*/) { return nullptr; }
const grpc_transport_vtable inproc_vtable = {sizeof(inproc_stream),
true,
"inproc",
init_stream,
nullptr,
set_pollset,
set_pollset_set,
perform_stream_op,
perform_transport_op,
destroy_stream,
destroy_transport,
get_endpoint};
grpc_endpoint* inproc_transport::GetEndpoint() { return nullptr; }
//******************************************************************************
// Main inproc transport functions
//
void inproc_transports_create(grpc_transport** server_transport,
grpc_transport** client_transport) {
void inproc_transports_create(grpc_core::Transport** server_transport,
grpc_core::Transport** client_transport) {
INPROC_LOG(GPR_INFO, "inproc_transports_create");
shared_mu* mu = new (gpr_malloc(sizeof(*mu))) shared_mu();
inproc_transport* st = new (gpr_malloc(sizeof(*st)))
inproc_transport(&inproc_vtable, mu, /*is_client=*/false);
inproc_transport* ct = new (gpr_malloc(sizeof(*ct)))
inproc_transport(&inproc_vtable, mu, /*is_client=*/true);
inproc_transport* st =
new (gpr_malloc(sizeof(*st))) inproc_transport(mu, /*is_client=*/false);
inproc_transport* ct =
new (gpr_malloc(sizeof(*ct))) inproc_transport(mu, /*is_client=*/true);
st->other_side = ct;
ct->other_side = st;
*server_transport = reinterpret_cast<grpc_transport*>(st);
*client_transport = reinterpret_cast<grpc_transport*>(ct);
*server_transport = reinterpret_cast<grpc_core::Transport*>(st);
*client_transport = reinterpret_cast<grpc_core::Transport*>(ct);
}
} // namespace
@ -1241,8 +1255,8 @@ grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
.channel_args_preconditioning()
.PreconditionChannelArgs(args)
.Set(GRPC_ARG_DEFAULT_AUTHORITY, "inproc.authority");
grpc_transport* server_transport;
grpc_transport* client_transport;
grpc_core::Transport* server_transport;
grpc_core::Transport* client_transport;
inproc_transports_create(&server_transport, &client_transport);
// TODO(ncteisen): design and support channelz GetSocket for inproc.
@ -1264,7 +1278,7 @@ grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
}
// client_transport was destroyed when grpc_channel_create_internal saw an
// error.
grpc_transport_destroy(server_transport);
server_transport->Orphan();
channel = grpc_lame_client_channel_create(
nullptr, status, "Failed to create client channel");
} else {
@ -1280,11 +1294,10 @@ grpc_channel* grpc_legacy_inproc_channel_create(grpc_server* server,
&integer)) {
status = static_cast<grpc_status_code>(integer);
}
grpc_transport_destroy(client_transport);
grpc_transport_destroy(server_transport);
client_transport->Orphan();
server_transport->Orphan();
channel = grpc_lame_client_channel_create(
nullptr, status, "Failed to create server channel");
}
return channel;
}

@ -79,11 +79,9 @@
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
typedef struct connected_channel_channel_data {
grpc_transport* transport;
grpc_core::Transport* transport;
} channel_data;
struct callback_state {
@ -182,15 +180,15 @@ static void connected_channel_start_transport_stream_op_batch(
callback_state* state = get_state_for_batch(calld, batch);
intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
}
grpc_transport_perform_stream_op(
chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), batch);
chand->transport->filter_stack_transport()->PerformStreamOp(
TRANSPORT_STREAM_FROM_CALL_DATA(calld), batch);
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "passed batch to transport");
}
static void connected_channel_start_transport_op(grpc_channel_element* elem,
grpc_transport_op* op) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_transport_perform_op(chand->transport, op);
chand->transport->PerformOp(op);
}
// Constructor for call_data
@ -199,19 +197,18 @@ static grpc_error_handle connected_channel_init_call_elem(
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
calld->call_combiner = args->call_combiner;
int r = grpc_transport_init_stream(
chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data, args->arena);
return r == 0 ? absl::OkStatus()
: GRPC_ERROR_CREATE("transport stream initialization failed");
chand->transport->filter_stack_transport()->InitStream(
TRANSPORT_STREAM_FROM_CALL_DATA(calld), &args->call_stack->refcount,
args->server_transport_data, args->arena);
return absl::OkStatus();
}
static void set_pollset_or_pollset_set(grpc_call_element* elem,
grpc_polling_entity* pollent) {
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_transport_set_pops(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
chand->transport->SetPollingEntity(TRANSPORT_STREAM_FROM_CALL_DATA(calld),
pollent);
}
// Destructor for call_data
@ -220,9 +217,8 @@ static void connected_channel_destroy_call_elem(
grpc_closure* then_schedule_closure) {
call_data* calld = static_cast<call_data*>(elem->call_data);
channel_data* chand = static_cast<channel_data*>(elem->channel_data);
grpc_transport_destroy_stream(chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
then_schedule_closure);
chand->transport->filter_stack_transport()->DestroyStream(
TRANSPORT_STREAM_FROM_CALL_DATA(calld), then_schedule_closure);
}
// Constructor for channel_data
@ -230,7 +226,7 @@ static grpc_error_handle connected_channel_init_channel_elem(
grpc_channel_element* elem, grpc_channel_element_args* args) {
channel_data* cd = static_cast<channel_data*>(elem->channel_data);
GPR_ASSERT(args->is_last);
cd->transport = args->channel_args.GetObject<grpc_transport>();
cd->transport = args->channel_args.GetObject<grpc_core::Transport>();
return absl::OkStatus();
}
@ -238,7 +234,7 @@ static grpc_error_handle connected_channel_init_channel_elem(
static void connected_channel_destroy_channel_elem(grpc_channel_element* elem) {
channel_data* cd = static_cast<channel_data*>(elem->channel_data);
if (cd->transport) {
grpc_transport_destroy(cd->transport);
cd->transport->Orphan();
}
}
@ -254,7 +250,7 @@ namespace {
defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
class ConnectedChannelStream : public Orphanable {
public:
explicit ConnectedChannelStream(grpc_transport* transport)
explicit ConnectedChannelStream(Transport* transport)
: transport_(transport), stream_(nullptr, StreamDeleter(this)) {
GRPC_STREAM_REF_INIT(
&stream_refcount_, 1,
@ -264,7 +260,7 @@ class ConnectedChannelStream : public Orphanable {
this, "ConnectedChannelStream");
}
grpc_transport* transport() { return transport_; }
Transport* transport() { return transport_; }
grpc_closure* stream_destroyed_closure() { return &stream_destroyed_; }
BatchBuilder::Target batch_target() {
@ -337,8 +333,8 @@ class ConnectedChannelStream : public Orphanable {
explicit StreamDeleter(ConnectedChannelStream* impl) : impl_(impl) {}
void operator()(grpc_stream* stream) const {
if (stream == nullptr) return;
grpc_transport_destroy_stream(impl_->transport(), stream,
impl_->stream_destroyed_closure());
impl_->transport()->filter_stack_transport()->DestroyStream(
stream, impl_->stream_destroyed_closure());
}
private:
@ -358,7 +354,7 @@ class ConnectedChannelStream : public Orphanable {
}
}
grpc_transport* const transport_;
Transport* const transport_;
RefCountedPtr<CallContext> const call_context_{
GetContext<CallContext>()->Ref()};
grpc_closure stream_destroyed_ =
@ -436,22 +432,22 @@ auto ConnectedChannelStream::SendMessages(
// defined(GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL)
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_CLIENT_CALL
ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(
grpc_transport* transport, CallArgs call_args, NextPromiseFactory) {
ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(Transport* transport,
CallArgs call_args,
NextPromiseFactory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(
GetContext<Arena>()->Alloc(transport->vtable->sizeof_stream)));
grpc_transport_init_stream(transport, stream->stream(),
stream->stream_refcount(), nullptr,
GetContext<Arena>());
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(stream->stream(),
stream->stream_refcount(),
nullptr, GetContext<Arena>());
auto* party = static_cast<Party*>(Activity::current());
party->Spawn(
"set_polling_entity", call_args.polling_entity->Wait(),
[transport,
stream = stream->InternalRef()](grpc_polling_entity polling_entity) {
grpc_transport_set_pops(transport, stream->stream(), &polling_entity);
});
party->Spawn("set_polling_entity", call_args.polling_entity->Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
// Start a loop to send messages from client_to_server_messages to the
// transport. When the pipe closes and the loop completes, send a trailing
// metadata batch to close the stream.
@ -573,15 +569,14 @@ ArenaPromise<ServerMetadataHandle> MakeClientCallPromise(
#ifdef GRPC_EXPERIMENT_IS_INCLUDED_PROMISE_BASED_SERVER_CALL
ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
grpc_transport* transport, CallArgs,
NextPromiseFactory next_promise_factory) {
Transport* transport, CallArgs, NextPromiseFactory next_promise_factory) {
OrphanablePtr<ConnectedChannelStream> stream(
GetContext<Arena>()->New<ConnectedChannelStream>(transport));
stream->SetStream(static_cast<grpc_stream*>(
GetContext<Arena>()->Alloc(transport->vtable->sizeof_stream)));
grpc_transport_init_stream(
transport, stream->stream(), stream->stream_refcount(),
stream->SetStream(static_cast<grpc_stream*>(GetContext<Arena>()->Alloc(
transport->filter_stack_transport()->SizeOfStream())));
transport->filter_stack_transport()->InitStream(
stream->stream(), stream->stream_refcount(),
GetContext<CallContext>()->server_call_context()->server_stream_data(),
GetContext<Arena>());
auto* party = static_cast<Party*>(Activity::current());
@ -600,12 +595,11 @@ ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
GetContext<CallFinalization>()->Add(
[call_data](const grpc_call_final_info*) { call_data->~CallData(); });
party->Spawn(
"set_polling_entity", call_data->polling_entity_latch.Wait(),
[transport,
stream = stream->InternalRef()](grpc_polling_entity polling_entity) {
grpc_transport_set_pops(transport, stream->stream(), &polling_entity);
});
party->Spawn("set_polling_entity", call_data->polling_entity_latch.Wait(),
[transport, stream = stream->InternalRef()](
grpc_polling_entity polling_entity) {
transport->SetPollingEntity(stream->stream(), &polling_entity);
});
auto server_to_client_empty =
call_data->server_to_client.receiver.AwaitEmpty();
@ -846,7 +840,7 @@ ArenaPromise<ServerMetadataHandle> MakeServerCallPromise(
#endif
template <ArenaPromise<ServerMetadataHandle> (*make_call_promise)(
grpc_transport*, CallArgs, NextPromiseFactory)>
Transport*, CallArgs, NextPromiseFactory)>
grpc_channel_filter MakeConnectedFilter() {
// Create a vtable that contains both the legacy call methods (for filter
// stack based calls) and the new promise based method for creating
@ -856,7 +850,7 @@ grpc_channel_filter MakeConnectedFilter() {
// call be promise based.
auto make_call_wrapper = +[](grpc_channel_element* elem, CallArgs call_args,
NextPromiseFactory next) {
grpc_transport* transport =
Transport* transport =
static_cast<channel_data*>(elem->channel_data)->transport;
return make_call_promise(transport, std::move(call_args), std::move(next));
};
@ -876,8 +870,10 @@ grpc_channel_filter MakeConnectedFilter() {
// do this, and I'm not sure what that is yet. This is only "safe"
// because call stacks place no additional data after the last call
// element, and the last call element MUST be the connected channel.
channel_stack->call_stack_size += grpc_transport_stream_size(
static_cast<channel_data*>(elem->channel_data)->transport);
channel_stack->call_stack_size +=
static_cast<channel_data*>(elem->channel_data)
->transport->filter_stack_transport()
->SizeOfStream();
},
connected_channel_destroy_channel_elem,
connected_channel_get_channel_info,
@ -886,8 +882,8 @@ grpc_channel_filter MakeConnectedFilter() {
}
ArenaPromise<ServerMetadataHandle> MakeTransportCallPromise(
grpc_transport* transport, CallArgs call_args, NextPromiseFactory) {
return transport->vtable->make_call_promise(transport, std::move(call_args));
Transport* transport, CallArgs call_args, NextPromiseFactory) {
return transport->client_transport()->MakeCallPromise(std::move(call_args));
}
const grpc_channel_filter kPromiseBasedTransportFilter =
@ -910,13 +906,8 @@ const grpc_channel_filter kServerEmulatedFilter =
#endif
bool TransportSupportsPromiseBasedCalls(const ChannelArgs& args) {
grpc_transport* transport =
args.GetPointer<grpc_transport>(GRPC_ARG_TRANSPORT);
return transport->vtable->make_call_promise != nullptr;
}
bool TransportDoesNotSupportPromiseBasedCalls(const ChannelArgs& args) {
return !TransportSupportsPromiseBasedCalls(args);
auto* transport = args.GetObject<Transport>();
return transport->client_transport() != nullptr;
}
} // namespace
@ -948,15 +939,15 @@ void RegisterConnectedChannel(CoreConfiguration::Builder* builder) {
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_SUBCHANNEL, &kClientEmulatedFilter)
.Terminal()
.If(TransportDoesNotSupportPromiseBasedCalls);
.IfNot(TransportSupportsPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_CLIENT_DIRECT_CHANNEL, &kClientEmulatedFilter)
.Terminal()
.If(TransportDoesNotSupportPromiseBasedCalls);
.IfNot(TransportSupportsPromiseBasedCalls);
builder->channel_init()
->RegisterFilter(GRPC_SERVER_CHANNEL, &kServerEmulatedFilter)
.Terminal()
.If(TransportDoesNotSupportPromiseBasedCalls);
.IfNot(TransportSupportsPromiseBasedCalls);
}
} // namespace grpc_core

@ -59,7 +59,6 @@
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/surface/init_internally.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
// IWYU pragma: no_include <type_traits>
@ -176,8 +175,7 @@ const grpc_arg_pointer_vtable channelz_node_arg_vtable = {
absl::StatusOr<RefCountedPtr<Channel>> Channel::Create(
const char* target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type,
grpc_transport* optional_transport) {
grpc_channel_stack_type channel_stack_type, Transport* optional_transport) {
if (!args.GetString(GRPC_ARG_DEFAULT_AUTHORITY).has_value()) {
auto ssl_override = args.GetString(GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
if (ssl_override.has_value()) {

@ -55,7 +55,7 @@
#include "src/core/lib/resource_quota/memory_quota.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport.h"
/// The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
/// is safe to use from within core.
@ -112,7 +112,7 @@ class Channel : public RefCounted<Channel>,
static absl::StatusOr<RefCountedPtr<Channel>> Create(
const char* target, ChannelArgs args,
grpc_channel_stack_type channel_stack_type,
grpc_transport* optional_transport);
Transport* optional_transport);
static absl::StatusOr<RefCountedPtr<Channel>> CreateWithBuilder(
ChannelStackBuilder* builder);

@ -753,7 +753,7 @@ void Server::Start() {
}
grpc_error_handle Server::SetupTransport(
grpc_transport* transport, grpc_pollset* accepting_pollset,
Transport* transport, grpc_pollset* accepting_pollset,
const ChannelArgs& args,
const RefCountedPtr<channelz::SocketNode>& socket_node) {
// Create channel.
@ -1156,8 +1156,7 @@ Server::ChannelData::~ChannelData() {
void Server::ChannelData::InitTransport(RefCountedPtr<Server> server,
RefCountedPtr<Channel> channel,
size_t cq_idx,
grpc_transport* transport,
size_t cq_idx, Transport* transport,
intptr_t channelz_socket_uuid) {
server_ = std::move(server);
channel_ = channel;
@ -1226,7 +1225,7 @@ void Server::ChannelData::InitTransport(RefCountedPtr<Server> server,
if (server_->ShutdownCalled()) {
op->disconnect_with_error = GRPC_ERROR_CREATE("Server shutdown");
}
grpc_transport_perform_op(transport, op);
transport->PerformOp(op);
}
Server::ChannelRegisteredMethod* Server::ChannelData::GetRegisteredMethod(
@ -1300,7 +1299,7 @@ void Server::ChannelData::SetRegisteredMethodOnMetadata(
metadata->Set(GrpcRegisteredMethod(), method);
}
void Server::ChannelData::AcceptStream(void* arg, grpc_transport* /*transport*/,
void Server::ChannelData::AcceptStream(void* arg, Transport* /*transport*/,
const void* transport_server_data) {
auto* chand = static_cast<Server::ChannelData*>(arg);
// create a call

@ -65,7 +65,6 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
namespace grpc_core {
@ -161,7 +160,7 @@ class Server : public InternallyRefCounted<Server>,
// the server. Called from the listener when a new connection is accepted.
// Takes ownership of a ref on resource_user from the caller.
grpc_error_handle SetupTransport(
grpc_transport* transport, grpc_pollset* accepting_pollset,
Transport* transport, grpc_pollset* accepting_pollset,
const ChannelArgs& args,
const RefCountedPtr<channelz::SocketNode>& socket_node);
@ -238,8 +237,7 @@ class Server : public InternallyRefCounted<Server>,
void InitTransport(RefCountedPtr<Server> server,
RefCountedPtr<Channel> channel, size_t cq_idx,
grpc_transport* transport,
intptr_t channelz_socket_uuid);
Transport* transport, intptr_t channelz_socket_uuid);
RefCountedPtr<Server> server() const { return server_; }
Channel* channel() const { return channel_.get(); }
@ -260,7 +258,7 @@ class Server : public InternallyRefCounted<Server>,
private:
class ConnectivityWatcher;
static void AcceptStream(void* arg, grpc_transport* /*transport*/,
static void AcceptStream(void* arg, Transport* /*transport*/,
const void* transport_server_data);
static void SetRegisteredMethodOnMetadata(void* arg,
ServerMetadata* metadata);

@ -23,7 +23,6 @@
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_impl.h"
namespace grpc_core {
@ -98,8 +97,8 @@ BatchBuilder::Batch::~Batch() {
BatchBuilder::Batch* BatchBuilder::GetBatch(Target target) {
if (target_.has_value() &&
(target_->stream != target.stream ||
target.transport->vtable
->hacky_disable_stream_op_batch_coalescing_in_connected_channel)) {
target.transport->filter_stack_transport()
->HackyDisableStreamOpBatchCoalescingInConnectedChannel())) {
FlushBatch();
}
if (!target_.has_value()) {
@ -125,7 +124,8 @@ void BatchBuilder::FlushBatch() {
}
void BatchBuilder::Batch::PerformWith(Target target) {
grpc_transport_perform_stream_op(target.transport, target.stream, &batch);
target.transport->filter_stack_transport()->PerformStreamOp(target.stream,
&batch);
}
ServerMetadataHandle BatchBuilder::CompleteSendServerTrailingMetadata(

@ -49,7 +49,6 @@
#include "src/core/lib/surface/call_trace.h"
#include "src/core/lib/transport/metadata_batch.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
namespace grpc_core {
@ -66,7 +65,7 @@ class BatchBuilder {
}
struct Target {
grpc_transport* transport;
Transport* transport;
grpc_stream* stream;
grpc_stream_refcount* stream_refcount;
};

@ -33,12 +33,10 @@
#include <grpc/grpc.h>
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/time.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/transport/transport_impl.h"
grpc_core::DebugOnlyTraceFlag grpc_trace_stream_refcount(false,
"stream_refcount");
@ -104,57 +102,20 @@ void grpc_transport_move_stats(grpc_transport_stream_stats* from,
to->latency = std::exchange(from->latency, gpr_inf_future(GPR_TIMESPAN));
}
size_t grpc_transport_stream_size(grpc_transport* transport) {
return GPR_ROUND_UP_TO_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
}
void grpc_transport_destroy(grpc_transport* transport) {
transport->vtable->destroy(transport);
}
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data,
grpc_core::Arena* arena) {
return transport->vtable->init_stream(transport, stream, refcount,
server_data, arena);
}
void grpc_transport_perform_stream_op(grpc_transport* transport,
grpc_stream* stream,
grpc_transport_stream_op_batch* op) {
transport->vtable->perform_stream_op(transport, stream, op);
}
void grpc_transport_perform_op(grpc_transport* transport,
grpc_transport_op* op) {
transport->vtable->perform_op(transport, op);
}
void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
grpc_polling_entity* pollent) {
grpc_pollset* pollset;
grpc_pollset_set* pollset_set;
if ((pollset = grpc_polling_entity_pollset(pollent)) != nullptr) {
transport->vtable->set_pollset(transport, stream, pollset);
} else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) !=
nullptr) {
transport->vtable->set_pollset_set(transport, stream, pollset_set);
namespace grpc_core {
void Transport::SetPollingEntity(grpc_stream* stream,
grpc_polling_entity* pollset_or_pollset_set) {
if (auto* pollset = grpc_polling_entity_pollset(pollset_or_pollset_set)) {
SetPollset(stream, pollset);
} else if (auto* pollset_set =
grpc_polling_entity_pollset_set(pollset_or_pollset_set)) {
SetPollsetSet(stream, pollset_set);
} else {
// No-op for empty pollset. Empty pollset is possible when using
// non-fd-based event engines such as CFStream.
}
}
void grpc_transport_destroy_stream(grpc_transport* transport,
grpc_stream* stream,
grpc_closure* then_schedule_closure) {
transport->vtable->destroy_stream(transport, stream, then_schedule_closure);
}
grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport) {
return transport->vtable->get_endpoint(transport);
}
} // namespace grpc_core
// This comment should be sung to the tune of
// "Supercalifragilisticexpialidocious":

@ -29,7 +29,9 @@
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include <grpc/impl/connectivity_state.h>
@ -522,12 +524,12 @@ typedef struct grpc_transport_op {
/// Error contract: the transport that gets this op must cause
/// goaway_error to be unref'ed after processing it
grpc_error_handle goaway_error;
void (*set_accept_stream_fn)(void* user_data, grpc_transport* transport,
void (*set_accept_stream_fn)(void* user_data, grpc_core::Transport* transport,
const void* server_data) = nullptr;
void (*set_registered_method_matcher_fn)(
void* user_data, grpc_core::ServerMetadata* metadata) = nullptr;
void* set_accept_stream_user_data = nullptr;
void (*set_make_promise_fn)(void* user_data, grpc_transport* transport,
void (*set_make_promise_fn)(void* user_data, grpc_core::Transport* transport,
const void* server_data) = nullptr;
void* set_make_promise_user_data = nullptr;
/// add this transport to a pollset
@ -571,42 +573,6 @@ typedef struct grpc_transport_op {
grpc_handler_private_op_data handler_private;
} grpc_transport_op;
// Returns the amount of memory required to store a grpc_stream for this
// transport
size_t grpc_transport_stream_size(grpc_transport* transport);
// Initialize transport data for a stream.
// Returns 0 on success, any other (transport-defined) value for failure.
// May assume that stream contains all-zeros.
// Arguments:
// transport - the transport on which to create this stream
// stream - a pointer to uninitialized memory to initialize
// server_data - either NULL for a client initiated stream, or a pointer
// supplied from the accept_stream callback function
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data,
grpc_core::Arena* arena);
void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
grpc_polling_entity* pollent);
// Destroy transport data for a stream.
// Requires: a recv_batch with final_state == GRPC_STREAM_CLOSED has been
// received by the up-layer. Must not be called in the same call stack as
// recv_frame.
// Arguments:
// transport - the transport on which to create this stream
// stream - the grpc_stream to destroy (memory is still owned by the
// caller, but any child memory must be cleaned up)
void grpc_transport_destroy_stream(grpc_transport* transport,
grpc_stream* stream,
grpc_closure* then_schedule_closure);
void grpc_transport_stream_op_batch_finish_with_failure(
grpc_transport_stream_op_batch* batch, grpc_error_handle error,
grpc_core::CallCombiner* call_combiner);
@ -622,37 +588,100 @@ std::string grpc_transport_stream_op_batch_string(
grpc_transport_stream_op_batch* op, bool truncate);
std::string grpc_transport_op_string(grpc_transport_op* op);
// Send a batch of operations on a transport
namespace grpc_core {
// Takes ownership of any objects contained in ops.
class FilterStackTransport {
public:
// Memory required for a single stream element - this is allocated by upper
// layers and initialized by the transport
virtual size_t SizeOfStream() const = 0;
// Initialize transport data for a stream.
// Returns 0 on success, any other (transport-defined) value for failure.
// May assume that stream contains all-zeros.
// Arguments:
// stream - a pointer to uninitialized memory to initialize
// server_data - either NULL for a client initiated stream, or a pointer
// supplied from the accept_stream callback function
virtual void InitStream(grpc_stream* stream, grpc_stream_refcount* refcount,
const void* server_data, Arena* arena) = 0;
// HACK: inproc does not handle stream op batch callbacks correctly (receive
// ops are required to complete prior to on_complete triggering).
// This flag is used to disable coalescing of batches in connected_channel for
// that specific transport.
// TODO(ctiller): This ought not be necessary once we have promises complete.
virtual bool HackyDisableStreamOpBatchCoalescingInConnectedChannel()
const = 0;
virtual void PerformStreamOp(grpc_stream* stream,
grpc_transport_stream_op_batch* op) = 0;
// Destroy transport data for a stream.
// Requires: a recv_batch with final_state == GRPC_STREAM_CLOSED has been
// received by the up-layer. Must not be called in the same call stack as
// recv_frame.
// Arguments:
// stream - the grpc_stream to destroy (memory is still owned by the
// caller, but any child memory must be cleaned up)
virtual void DestroyStream(grpc_stream* stream,
grpc_closure* then_schedule_closure) = 0;
protected:
~FilterStackTransport() = default;
};
// Arguments:
// transport - the transport on which to initiate the stream
// stream - the stream on which to send the operations. This must be
// non-NULL and previously initialized by the same transport.
// op - a grpc_transport_stream_op_batch specifying the op to perform
//
void grpc_transport_perform_stream_op(grpc_transport* transport,
grpc_stream* stream,
grpc_transport_stream_op_batch* op);
class ClientTransport {
public:
// Create a promise to execute one client call.
virtual ArenaPromise<ServerMetadataHandle> MakeCallPromise(
CallArgs call_args) = 0;
void grpc_transport_perform_op(grpc_transport* transport,
grpc_transport_op* op);
protected:
~ClientTransport() = default;
};
class ServerTransport {
public:
// Register the factory function for the filter stack part of a call
// promise.
void SetCallPromiseFactory(
absl::AnyInvocable<ArenaPromise<ServerMetadataHandle>(CallArgs) const>);
protected:
~ServerTransport() = default;
};
class Transport : public Orphanable {
public:
struct RawPointerChannelArgTag {};
static absl::string_view ChannelArgName() { return GRPC_ARG_TRANSPORT; }
virtual FilterStackTransport* filter_stack_transport() = 0;
virtual ClientTransport* client_transport() = 0;
virtual ServerTransport* server_transport() = 0;
// name of this transport implementation
virtual absl::string_view GetTransportName() const = 0;
// Send a ping on a transport
// implementation of grpc_transport_set_pollset
virtual void SetPollset(grpc_stream* stream, grpc_pollset* pollset) = 0;
// Calls cb with user data when a response is received.
void grpc_transport_ping(grpc_transport* transport, grpc_closure* cb);
// implementation of grpc_transport_set_pollset
virtual void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) = 0;
// Advise peer of pending connection termination.
void grpc_transport_goaway(grpc_transport* transport, grpc_status_code status,
grpc_slice debug_data);
void SetPollingEntity(grpc_stream* stream,
grpc_polling_entity* pollset_or_pollset_set);
// Destroy the transport
void grpc_transport_destroy(grpc_transport* transport);
// implementation of grpc_transport_perform_op
virtual void PerformOp(grpc_transport_op* op) = 0;
// Get the endpoint used by \a transport
grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport);
// implementation of grpc_transport_get_endpoint
virtual grpc_endpoint* GetEndpoint() = 0;
};
} // namespace grpc_core
// Allocate a grpc_transport_op, and preconfigure the on_complete closure to
// \a on_complete and then delete the returned transport op

@ -15,6 +15,8 @@
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_FWD_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_FWD_H
typedef struct grpc_transport grpc_transport;
namespace grpc_core {
class Transport;
}
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_FWD_H

@ -1,102 +0,0 @@
//
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
#ifndef GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H
#define GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include "absl/strings/string_view.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/promise/arena_promise.h"
#include "src/core/lib/resource_quota/arena.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
typedef struct grpc_transport_vtable {
// Memory required for a single stream element - this is allocated by upper
// layers and initialized by the transport
size_t sizeof_stream; // = sizeof(transport stream)
// HACK: inproc does not handle stream op batch callbacks correctly (receive
// ops are required to complete prior to on_complete triggering).
// This flag is used to disable coalescing of batches in connected_channel for
// that specific transport.
// TODO(ctiller): This ought not be necessary once we have promises complete.
bool hacky_disable_stream_op_batch_coalescing_in_connected_channel;
// name of this transport implementation
const char* name;
// implementation of grpc_transport_init_stream
int (*init_stream)(grpc_transport* self, grpc_stream* stream,
grpc_stream_refcount* refcount, const void* server_data,
grpc_core::Arena* arena);
// Create a promise to execute one client call.
// If this is non-null, it may be used in preference to
// perform_stream_op.
// If this is used in preference to perform_stream_op, the
// following can be omitted also:
// - calling init_stream, destroy_stream, set_pollset, set_pollset_set
// - allocation of memory for call data (sizeof_stream may be ignored)
// There is an on-going migration to move all filters to providing this, and
// then to drop perform_stream_op.
grpc_core::ArenaPromise<grpc_core::ServerMetadataHandle> (*make_call_promise)(
grpc_transport* self, grpc_core::CallArgs call_args);
// implementation of grpc_transport_set_pollset
void (*set_pollset)(grpc_transport* self, grpc_stream* stream,
grpc_pollset* pollset);
// implementation of grpc_transport_set_pollset
void (*set_pollset_set)(grpc_transport* self, grpc_stream* stream,
grpc_pollset_set* pollset_set);
// implementation of grpc_transport_perform_stream_op
void (*perform_stream_op)(grpc_transport* self, grpc_stream* stream,
grpc_transport_stream_op_batch* op);
// implementation of grpc_transport_perform_op
void (*perform_op)(grpc_transport* self, grpc_transport_op* op);
// implementation of grpc_transport_destroy_stream
void (*destroy_stream)(grpc_transport* self, grpc_stream* stream,
grpc_closure* then_schedule_closure);
// implementation of grpc_transport_destroy
void (*destroy)(grpc_transport* self);
// implementation of grpc_transport_get_endpoint
grpc_endpoint* (*get_endpoint)(grpc_transport* self);
} grpc_transport_vtable;
// an instance of a grpc transport
struct grpc_transport {
struct RawPointerChannelArgTag {};
static absl::string_view ChannelArgName() { return GRPC_ARG_TRANSPORT; }
// pointer to a vtable defining operations on this transport
const grpc_transport_vtable* vtable;
};
#endif // GRPC_SRC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H

@ -42,7 +42,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/util/test_config.h"
@ -72,7 +72,7 @@ static void set_done_write(void* arg, grpc_error_handle /*error*/) {
gpr_event_set(done_write, reinterpret_cast<void*>(1));
}
static void server_setup_transport(void* ts, grpc_transport* transport) {
static void server_setup_transport(void* ts, grpc_core::Transport* transport) {
thd_args* a = static_cast<thd_args*>(ts);
grpc_core::ExecCtx exec_ctx;
grpc_core::Server* core_server = grpc_core::Server::FromC(a->server);
@ -202,7 +202,7 @@ void grpc_run_bad_client_test(
grpc_bad_client_arg args[], int num_args, uint32_t flags) {
grpc_endpoint_pair sfd;
thd_args a;
grpc_transport* transport;
grpc_core::Transport* transport;
grpc_core::ExecCtx exec_ctx;
grpc_completion_queue* shutdown_cq;
grpc_completion_queue* client_cq;

@ -40,7 +40,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/port.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport.h"
// This test won't work except with posix sockets enabled
#ifdef GRPC_POSIX_SOCKET_TCP
@ -87,7 +87,7 @@ static test_ctx g_ctx;
// chttp2 transport that is immediately available (used for testing
// connected_channel without a client_channel
static void server_setup_transport(grpc_transport* transport) {
static void server_setup_transport(grpc_core::Transport* transport) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_add_to_pollset(g_ctx.ep->server, grpc_cq_pollset(g_ctx.cq));
grpc_core::Server* core_server = grpc_core::Server::FromC(g_ctx.server);
@ -97,7 +97,7 @@ static void server_setup_transport(grpc_transport* transport) {
core_server->channel_args(), nullptr)));
}
static void client_setup_transport(grpc_transport* transport) {
static void client_setup_transport(grpc_core::Transport* transport) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_add_to_pollset(g_ctx.ep->client,
grpc_cq_pollset(g_ctx.client_cq));
@ -117,7 +117,7 @@ static void client_setup_transport(grpc_transport* transport) {
static void init_client() {
grpc_core::ExecCtx exec_ctx;
grpc_transport* transport;
grpc_core::Transport* transport;
transport = grpc_create_chttp2_transport(grpc_core::ChannelArgs(),
g_ctx.ep->client, true);
client_setup_transport(transport);
@ -127,7 +127,7 @@ static void init_client() {
static void init_server() {
grpc_core::ExecCtx exec_ctx;
grpc_transport* transport;
grpc_core::Transport* transport;
GPR_ASSERT(!g_ctx.server);
g_ctx.server = grpc_server_create(nullptr, nullptr);
grpc_server_register_completion_queue(g_ctx.server, g_ctx.cq, nullptr);

@ -29,12 +29,13 @@
// configurations and assess whether such a change is correct and desirable.
//
#include <string.h>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "gtest/gtest.h"
#include <grpc/grpc.h>
@ -46,23 +47,49 @@
#include "src/core/lib/channel/channel_stack_builder_impl.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
#include "test/core/util/test_config.h"
namespace {
class FakeTransport final : public grpc_core::Transport {
public:
explicit FakeTransport(absl::string_view transport_name)
: transport_name_(transport_name) {}
grpc_core::FilterStackTransport* filter_stack_transport() override {
return nullptr;
}
grpc_core::ClientTransport* client_transport() override { return nullptr; }
grpc_core::ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override {
return transport_name_;
}
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override {}
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override {}
void PerformOp(grpc_transport_op* op) override {}
grpc_endpoint* GetEndpoint() override { return nullptr; }
void Orphan() override {}
private:
absl::string_view transport_name_;
};
} // namespace
std::vector<std::string> MakeStack(const char* transport_name,
grpc_core::ChannelArgs channel_args,
grpc_channel_stack_type channel_stack_type) {
// create phony channel stack
grpc_transport_vtable fake_transport_vtable;
memset(&fake_transport_vtable, 0, sizeof(grpc_transport_vtable));
fake_transport_vtable.name = transport_name;
grpc_transport fake_transport = {&fake_transport_vtable};
std::unique_ptr<FakeTransport> fake_transport;
if (transport_name != nullptr) {
channel_args = channel_args.SetObject(&fake_transport);
fake_transport = absl::make_unique<FakeTransport>(transport_name);
channel_args = channel_args.SetObject(fake_transport.get());
}
grpc_core::ChannelStackBuilderImpl builder("test", channel_stack_type,
channel_args);

@ -184,7 +184,6 @@ grpc_cc_library(
"//src/core:grpc_tls_credentials",
"//src/core:grpc_transport_inproc",
"//src/core:slice",
"//src/core:transport_fwd",
"//test/core/util:grpc_test_util",
],
)

@ -42,7 +42,6 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "test/core/end2end/end2end_tests.h"
namespace grpc_core {
@ -72,7 +71,7 @@ class SockpairFixture : public CoreTestFixture {
absl::AnyInvocable<void(grpc_server*)>& pre_server_start) override {
auto args = MutateServerArgs(in_args);
ExecCtx exec_ctx;
grpc_transport* transport;
Transport* transport;
auto* server = grpc_server_create(args.ToC().get(), nullptr);
grpc_server_register_completion_queue(server, cq, nullptr);
pre_server_start(server);
@ -91,7 +90,7 @@ class SockpairFixture : public CoreTestFixture {
if (error.ok()) {
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
} else {
grpc_transport_destroy(transport);
transport->Orphan();
}
return server;
}
@ -105,7 +104,7 @@ class SockpairFixture : public CoreTestFixture {
.Set(GRPC_ARG_DEFAULT_AUTHORITY, "test-authority")
.ToC()
.get());
grpc_transport* transport;
Transport* transport;
auto* client_endpoint = std::exchange(ep_.client, nullptr);
EXPECT_NE(client_endpoint, nullptr);
transport = grpc_create_chttp2_transport(args, client_endpoint, true);
@ -119,7 +118,7 @@ class SockpairFixture : public CoreTestFixture {
client = grpc_lame_client_channel_create(
nullptr, static_cast<grpc_status_code>(channel.status().code()),
"lame channel");
grpc_transport_destroy(transport);
transport->Orphan();
}
GPR_ASSERT(client);
return client;

@ -33,7 +33,7 @@
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/api_fuzzer.pb.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
@ -64,7 +64,7 @@ class ClientFuzzer final : public BasicFuzzer {
.channel_args_preconditioning()
.PreconditionChannelArgs(nullptr)
.SetIfUnset(GRPC_ARG_DEFAULT_AUTHORITY, "test-authority");
grpc_transport* transport =
Transport* transport =
grpc_create_chttp2_transport(args, mock_endpoint_, true);
channel_ = Channel::Create("test-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport)

@ -31,7 +31,7 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport.h"
#include "src/libfuzzer/libfuzzer_macro.h"
#include "test/core/end2end/fuzzers/api_fuzzer.pb.h"
#include "test/core/end2end/fuzzers/fuzzer_input.pb.h"
@ -70,7 +70,7 @@ class ServerFuzzer final : public BasicFuzzer {
msg.channel_args(), FuzzingEnvironment{resource_quota()})
.ToC()
.get());
grpc_transport* transport =
Transport* transport =
grpc_create_chttp2_transport(channel_args, mock_endpoint_, false);
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"SetupTransport", Server::FromC(server_)->SetupTransport(

@ -58,7 +58,7 @@ class BinderTransportTest : public ::testing::Test {
~BinderTransportTest() override {
grpc_core::ExecCtx exec_ctx;
grpc_transport_destroy(transport_);
transport_->Orphan();
grpc_core::ExecCtx::Get()->Flush();
for (grpc_binder_stream* gbs : stream_buffer_) {
gbs->~grpc_binder_stream();
@ -69,8 +69,8 @@ class BinderTransportTest : public ::testing::Test {
void PerformStreamOp(grpc_binder_stream* gbs,
grpc_transport_stream_op_batch* op) {
grpc_transport_perform_stream_op(transport_,
reinterpret_cast<grpc_stream*>(gbs), op);
transport_->filter_stack_transport()->PerformStreamOp(
reinterpret_cast<grpc_stream*>(gbs), op);
}
grpc_binder_transport* GetBinderTransport() {
@ -79,9 +79,9 @@ class BinderTransportTest : public ::testing::Test {
grpc_binder_stream* InitNewBinderStream() {
grpc_binder_stream* gbs = static_cast<grpc_binder_stream*>(
gpr_malloc(grpc_transport_stream_size(transport_)));
grpc_transport_init_stream(transport_, reinterpret_cast<grpc_stream*>(gbs),
&ref_, nullptr, arena_);
gpr_malloc(transport_->filter_stack_transport()->SizeOfStream()));
transport_->filter_stack_transport()->InitStream(
reinterpret_cast<grpc_stream*>(gbs), &ref_, nullptr, arena_);
stream_buffer_.push_back(gbs);
return gbs;
}
@ -101,7 +101,7 @@ class BinderTransportTest : public ::testing::Test {
->CreateMemoryAllocator("test"));
grpc_core::Arena* arena_ =
grpc_core::Arena::Create(/* initial_size = */ 1, &memory_allocator_);
grpc_transport* transport_;
grpc_core::Transport* transport_;
grpc_stream_refcount ref_;
std::vector<grpc_binder_stream*> stream_buffer_;
};

@ -79,14 +79,14 @@ class End2EndBinderTransportTest
} // namespace
TEST_P(End2EndBinderTransportTest, SetupTransport) {
grpc_transport *client_transport, *server_transport;
grpc_core::Transport *client_transport, *server_transport;
std::tie(client_transport, server_transport) =
end2end_testing::CreateClientServerBindersPairForTesting();
EXPECT_NE(client_transport, nullptr);
EXPECT_NE(server_transport, nullptr);
grpc_transport_destroy(client_transport);
grpc_transport_destroy(server_transport);
client_transport->Orphan();
server_transport->Orphan();
}
TEST_P(End2EndBinderTransportTest, UnaryCall) {

@ -44,11 +44,12 @@ DEFINE_PROTO_FUZZER(const binder_transport_fuzzer::Input& input) {
grpc_core::Executor::SetThreadingAll(false);
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
grpc_transport* client_transport = grpc_create_binder_transport_client(
std::make_unique<grpc_binder::fuzzing::BinderForFuzzing>(
input.incoming_parcels()),
std::make_shared<
grpc::experimental::binder::UntrustedSecurityPolicy>());
grpc_core::Transport* client_transport =
grpc_create_binder_transport_client(
std::make_unique<grpc_binder::fuzzing::BinderForFuzzing>(
input.incoming_parcels()),
std::make_shared<
grpc::experimental::binder::UntrustedSecurityPolicy>());
grpc_arg authority_arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY),
const_cast<char*>("test-authority"));

@ -44,11 +44,12 @@ DEFINE_PROTO_FUZZER(const binder_transport_fuzzer::Input& input) {
// TODO(ctiller): add more registered methods (one for POST, one for PUT)
grpc_server_register_method(server, "/reg", nullptr, {}, 0);
grpc_server_start(server);
grpc_transport* server_transport = grpc_create_binder_transport_server(
std::make_unique<grpc_binder::fuzzing::BinderForFuzzing>(
input.incoming_parcels()),
std::make_shared<
grpc::experimental::binder::UntrustedSecurityPolicy>());
grpc_core::Transport* server_transport =
grpc_create_binder_transport_server(
std::make_unique<grpc_binder::fuzzing::BinderForFuzzing>(
input.incoming_parcels()),
std::make_shared<
grpc::experimental::binder::UntrustedSecurityPolicy>());
grpc_core::ChannelArgs channel_args = grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(nullptr);

@ -60,15 +60,15 @@ class ServerSetupTransportHelper {
};
} // namespace
std::pair<grpc_transport*, grpc_transport*>
std::pair<grpc_core::Transport*, grpc_core::Transport*>
CreateClientServerBindersPairForTesting() {
ServerSetupTransportHelper helper;
std::unique_ptr<Binder> endpoint_binder = helper.GetEndpointBinderForClient();
grpc_transport* client_transport = nullptr;
grpc_core::Transport* client_transport = nullptr;
struct ThreadArgs {
std::unique_ptr<Binder> endpoint_binder;
grpc_transport** client_transport;
grpc_core::Transport** client_transport;
} args;
args.endpoint_binder = std::move(endpoint_binder);
@ -87,7 +87,7 @@ CreateClientServerBindersPairForTesting() {
},
&args);
client_thread.Start();
grpc_transport* server_transport = grpc_create_binder_transport_server(
grpc_core::Transport* server_transport = grpc_create_binder_transport_server(
helper.WaitForClientBinder(),
std::make_shared<grpc::experimental::binder::UntrustedSecurityPolicy>());
client_thread.Join();
@ -118,7 +118,7 @@ grpc_channel* grpc_binder_channel_create_for_testing(
auto client_args =
server_args.Set(GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
grpc_transport *client_transport, *server_transport;
grpc_core::Transport *client_transport, *server_transport;
std::tie(client_transport, server_transport) =
grpc_binder::end2end_testing::CreateClientServerBindersPairForTesting();
grpc_error_handle error = grpc_core::Server::FromC(server)->SetupTransport(

@ -26,7 +26,7 @@
namespace grpc_binder {
namespace end2end_testing {
std::pair<grpc_transport*, grpc_transport*>
std::pair<grpc_core::Transport*, grpc_core::Transport*>
CreateClientServerBindersPairForTesting();
std::shared_ptr<grpc::Channel> BinderChannelForTesting(

@ -32,7 +32,6 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/transport/transport.h"
#include "test/core/util/mock_endpoint.h"
#include "test/core/util/test_config.h"
@ -64,7 +63,7 @@ TEST_F(ConfigurationTest, ClientKeepaliveDefaults) {
EXPECT_EQ(t->keepalive_timeout, Duration::Infinity());
EXPECT_EQ(t->keepalive_permit_without_calls, false);
EXPECT_EQ(t->ping_rate_policy.TestOnlyMaxPingsWithoutData(), 2);
grpc_transport_destroy(&t->base);
t->Orphan();
}
TEST_F(ConfigurationTest, ClientKeepaliveExplicitArgs) {
@ -79,7 +78,7 @@ TEST_F(ConfigurationTest, ClientKeepaliveExplicitArgs) {
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));
EXPECT_EQ(t->keepalive_permit_without_calls, true);
EXPECT_EQ(t->ping_rate_policy.TestOnlyMaxPingsWithoutData(), 3);
grpc_transport_destroy(&t->base);
t->Orphan();
}
TEST_F(ConfigurationTest, ServerKeepaliveDefaults) {
@ -94,7 +93,7 @@ TEST_F(ConfigurationTest, ServerKeepaliveDefaults) {
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMinPingIntervalWithoutData(),
Duration::Minutes(5));
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMaxPingStrikes(), 2);
grpc_transport_destroy(&t->base);
t->Orphan();
}
TEST_F(ConfigurationTest, ServerKeepaliveExplicitArgs) {
@ -116,7 +115,7 @@ TEST_F(ConfigurationTest, ServerKeepaliveExplicitArgs) {
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMinPingIntervalWithoutData(),
Duration::Seconds(20));
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMaxPingStrikes(), 0);
grpc_transport_destroy(&t->base);
t->Orphan();
}
// This test modifies the defaults of the client side settings, so it would
@ -139,7 +138,7 @@ TEST_F(ConfigurationTest, ModifyClientDefaults) {
EXPECT_EQ(t->keepalive_timeout, Duration::Seconds(10));
EXPECT_EQ(t->keepalive_permit_without_calls, true);
EXPECT_EQ(t->ping_rate_policy.TestOnlyMaxPingsWithoutData(), 3);
grpc_transport_destroy(&t->base);
t->Orphan();
}
// This test modifies the defaults of the client side settings, so it would
@ -168,7 +167,7 @@ TEST_F(ConfigurationTest, ModifyServerDefaults) {
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMinPingIntervalWithoutData(),
Duration::Seconds(20));
EXPECT_EQ(t->ping_abuse_policy.TestOnlyMaxPingStrikes(), 0);
grpc_transport_destroy(&t->base);
t->Orphan();
}
} // namespace

@ -18,8 +18,6 @@
#include "src/core/ext/xds/xds_channel_stack_modifier.h"
#include <string.h>
#include <algorithm>
#include <string>
@ -31,11 +29,12 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/channel_stack_builder_impl.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/transport_fwd.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/core/lib/transport/transport.h"
#include "test/core/util/test_config.h"
namespace grpc_core {
@ -74,6 +73,23 @@ TEST(XdsChannelStackModifierTest, ChannelArgsCompare) {
constexpr char kTestFilter1[] = "test_filter_1";
constexpr char kTestFilter2[] = "test_filter_2";
namespace {
class FakeTransport final : public Transport {
public:
FilterStackTransport* filter_stack_transport() override { return nullptr; }
ClientTransport* client_transport() override { return nullptr; }
ServerTransport* server_transport() override { return nullptr; }
absl::string_view GetTransportName() const override { return "fake"; }
void SetPollset(grpc_stream* stream, grpc_pollset* pollset) override {}
void SetPollsetSet(grpc_stream* stream,
grpc_pollset_set* pollset_set) override {}
void PerformOp(grpc_transport_op* op) override {}
grpc_endpoint* GetEndpoint() override { return nullptr; }
void Orphan() override {}
};
} // namespace
// Test filters insertion
TEST(XdsChannelStackModifierTest, XdsHttpFiltersInsertion) {
CoreConfiguration::Reset();
@ -88,15 +104,12 @@ TEST(XdsChannelStackModifierTest, XdsHttpFiltersInsertion) {
auto channel_stack_modifier = MakeRefCounted<XdsChannelStackModifier>(
std::vector<const grpc_channel_filter*>{&test_filter_1, &test_filter_2});
grpc_arg arg = channel_stack_modifier->MakeChannelArg();
FakeTransport fake_transport;
// Create a phony ChannelStackBuilder object
grpc_channel_args* args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
grpc_transport_vtable fake_transport_vtable;
memset(&fake_transport_vtable, 0, sizeof(grpc_transport_vtable));
fake_transport_vtable.name = "fake";
grpc_transport fake_transport = {&fake_transport_vtable};
ChannelStackBuilderImpl builder(
"test", GRPC_SERVER_CHANNEL,
ChannelArgs::FromC(args).SetObject<grpc_transport>(&fake_transport));
ChannelArgs::FromC(args).SetObject<Transport>(&fake_transport));
grpc_channel_args_destroy(args);
// Construct channel stack and verify that the test filters were successfully
// added

@ -226,22 +226,6 @@ grpc_cc_test(
deps = [":helpers"],
)
grpc_cc_test(
name = "bm_call_create",
srcs = ["bm_call_create.cc"],
args = grpc_benchmark_args(),
tags = [
"no_mac",
"no_windows",
],
uses_event_engine = False,
uses_polling = False,
deps = [
":helpers",
"//src/core:channel_args",
],
)
grpc_cc_test(
name = "bm_cq",
srcs = ["bm_cq.cc"],

@ -1,834 +0,0 @@
//
//
// Copyright 2017 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
// This benchmark exists to ensure that the benchmark integration is
// working
#include <string.h>
#include <sstream>
#include <benchmark/benchmark.h>
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
#include <grpc/support/string_util.h>
#include <grpcpp/channel.h>
#include <grpcpp/support/channel_arguments.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/ext/filters/deadline/deadline_filter.h"
#include "src/core/ext/filters/http/client/http_client_filter.h"
#include "src/core/ext/filters/http/message_compress/compression_filter.h"
#include "src/core/ext/filters/http/server/http_server_filter.h"
#include "src/core/ext/filters/message_size/message_size_filter.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/channel_stack_builder_impl.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/resource_quota/resource_quota.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/transport_impl.h"
#include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/test_config.h"
#include "test/cpp/microbenchmarks/helpers.h"
#include "test/cpp/util/test_config.h"
void BM_Zalloc(benchmark::State& state) {
// speed of light for call creation is zalloc, so benchmark a few interesting
// sizes
size_t sz = state.range(0);
for (auto _ : state) {
gpr_free(gpr_zalloc(sz));
}
}
BENCHMARK(BM_Zalloc)
->Arg(64)
->Arg(128)
->Arg(256)
->Arg(512)
->Arg(1024)
->Arg(1536)
->Arg(2048)
->Arg(3072)
->Arg(4096)
->Arg(5120)
->Arg(6144)
->Arg(7168);
////////////////////////////////////////////////////////////////////////////////
// Benchmarks creating full stacks
class BaseChannelFixture {
public:
explicit BaseChannelFixture(grpc_channel* channel) : channel_(channel) {}
~BaseChannelFixture() { grpc_channel_destroy(channel_); }
grpc_channel* channel() const { return channel_; }
private:
grpc_channel* const channel_;
};
static grpc_channel* CreateChannel() {
grpc_channel_credentials* creds = grpc_insecure_credentials_create();
grpc_channel* channel = grpc_channel_create("localhost:1234", creds, nullptr);
grpc_channel_credentials_release(creds);
return channel;
}
class InsecureChannel : public BaseChannelFixture {
public:
InsecureChannel() : BaseChannelFixture(CreateChannel()) {}
};
class LameChannel : public BaseChannelFixture {
public:
LameChannel()
: BaseChannelFixture(grpc_lame_client_channel_create(
"localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah")) {}
};
template <class Fixture>
static void BM_CallCreateDestroy(benchmark::State& state) {
Fixture fixture;
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
for (auto _ : state) {
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, cq, method_hdl,
deadline, nullptr));
}
grpc_completion_queue_destroy(cq);
}
BENCHMARK_TEMPLATE(BM_CallCreateDestroy, InsecureChannel);
BENCHMARK_TEMPLATE(BM_CallCreateDestroy, LameChannel);
////////////////////////////////////////////////////////////////////////////////
// Benchmarks isolating individual filters
static void* tag(int i) {
return reinterpret_cast<void*>(static_cast<intptr_t>(i));
}
static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
auto stub =
grpc::testing::EchoTestService::NewStub(grpc::CreateChannelInternal(
"",
grpc_lame_client_channel_create("localhost:1234",
GRPC_STATUS_UNAUTHENTICATED, "blah"),
std::vector<std::unique_ptr<
grpc::experimental::ClientInterceptorFactoryInterface>>()));
grpc::CompletionQueue cq;
grpc::testing::EchoRequest send_request;
grpc::testing::EchoResponse recv_response;
grpc::Status recv_status;
for (auto _ : state) {
grpc::ClientContext cli_ctx;
auto reader = stub->AsyncEcho(&cli_ctx, send_request, &cq);
reader->Finish(&recv_response, &recv_status, tag(0));
void* t;
bool ok;
GPR_ASSERT(cq.Next(&t, &ok));
GPR_ASSERT(ok);
}
}
BENCHMARK(BM_LameChannelCallCreateCpp);
static void do_nothing(void* /*ignored*/) {}
static void BM_LameChannelCallCreateCore(benchmark::State& state) {
grpc_channel* channel;
grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_byte_buffer* response_payload_recv = nullptr;
grpc_status_code status;
grpc_slice details;
grpc::testing::EchoRequest send_request;
grpc_slice send_request_slice =
grpc_slice_new(&send_request, sizeof(send_request), do_nothing);
channel = grpc_lame_client_channel_create(
"localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah");
cq = grpc_completion_queue_create_for_next(nullptr);
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
for (auto _ : state) {
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_byte_buffer* request_payload_send =
grpc_raw_byte_buffer_create(&send_request_slice, 1);
// Fill in call ops
grpc_op ops[6];
memset(ops, 0, sizeof(ops));
grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload_send;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
&initial_metadata_recv;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &response_payload_recv;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
(void*)1, nullptr));
grpc_event ev = grpc_completion_queue_next(
cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success != 0);
grpc_call_unref(call);
grpc_byte_buffer_destroy(request_payload_send);
grpc_byte_buffer_destroy(response_payload_recv);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
}
grpc_channel_destroy(channel);
grpc_completion_queue_destroy(cq);
grpc_slice_unref(send_request_slice);
}
BENCHMARK(BM_LameChannelCallCreateCore);
static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
grpc_channel* channel;
grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_byte_buffer* response_payload_recv = nullptr;
grpc_status_code status;
grpc_slice details;
grpc::testing::EchoRequest send_request;
grpc_slice send_request_slice =
grpc_slice_new(&send_request, sizeof(send_request), do_nothing);
channel = grpc_lame_client_channel_create(
"localhost:1234", GRPC_STATUS_UNAUTHENTICATED, "blah");
cq = grpc_completion_queue_create_for_next(nullptr);
void* rc = grpc_channel_register_call(
channel, "/grpc.testing.EchoTestService/Echo", nullptr, nullptr);
for (auto _ : state) {
grpc_call* call = grpc_channel_create_registered_call(
channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq, rc,
gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_byte_buffer* request_payload_send =
grpc_raw_byte_buffer_create(&send_request_slice, 1);
// Fill in call ops
grpc_op ops[3];
memset(ops, 0, sizeof(ops));
grpc_op* op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message.send_message = request_payload_send;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
(void*)nullptr, nullptr));
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata =
&initial_metadata_recv;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message.recv_message = &response_payload_recv;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(call, ops,
(size_t)(op - ops),
(void*)1, nullptr));
grpc_event ev = grpc_completion_queue_next(
cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success == 0);
ev = grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
nullptr);
GPR_ASSERT(ev.type != GRPC_QUEUE_SHUTDOWN);
GPR_ASSERT(ev.success != 0);
grpc_call_unref(call);
grpc_byte_buffer_destroy(request_payload_send);
grpc_byte_buffer_destroy(response_payload_recv);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
}
grpc_channel_destroy(channel);
grpc_completion_queue_destroy(cq);
grpc_slice_unref(send_request_slice);
}
BENCHMARK(BM_LameChannelCallCreateCoreSeparateBatch);
static void FilterDestroy(void* arg, grpc_error_handle /*error*/) {
gpr_free(arg);
}
static void DoNothing(void* /*arg*/, grpc_error_handle /*error*/) {}
class FakeClientChannelFactory : public grpc_core::ClientChannelFactory {
public:
grpc_core::RefCountedPtr<grpc_core::Subchannel> CreateSubchannel(
const grpc_resolved_address& /*address*/,
const grpc_core::ChannelArgs& /*args*/) override {
return nullptr;
}
};
enum FixtureFlags : uint32_t {
CHECKS_NOT_LAST = 1,
REQUIRES_TRANSPORT = 2,
};
template <const grpc_channel_filter* kFilter, uint32_t kFlags>
struct Fixture {
const grpc_channel_filter* filter = kFilter;
const uint32_t flags = kFlags;
};
namespace phony_filter {
static void StartTransportStreamOp(grpc_call_element* /*elem*/,
grpc_transport_stream_op_batch* /*op*/) {}
static void StartTransportOp(grpc_channel_element* /*elem*/,
grpc_transport_op* /*op*/) {}
static grpc_error_handle InitCallElem(grpc_call_element* /*elem*/,
const grpc_call_element_args* /*args*/) {
return absl::OkStatus();
}
static void SetPollsetOrPollsetSet(grpc_call_element* /*elem*/,
grpc_polling_entity* /*pollent*/) {}
static void DestroyCallElem(grpc_call_element* /*elem*/,
const grpc_call_final_info* /*final_info*/,
grpc_closure* /*then_sched_closure*/) {}
grpc_error_handle InitChannelElem(grpc_channel_element* /*elem*/,
grpc_channel_element_args* /*args*/) {
return absl::OkStatus();
}
void DestroyChannelElem(grpc_channel_element* /*elem*/) {}
void GetChannelInfo(grpc_channel_element* /*elem*/,
const grpc_channel_info* /*channel_info*/) {}
static const grpc_channel_filter phony_filter = {
StartTransportStreamOp, nullptr,
StartTransportOp, 0,
InitCallElem, SetPollsetOrPollsetSet,
DestroyCallElem, 0,
InitChannelElem, grpc_channel_stack_no_post_init,
DestroyChannelElem, GetChannelInfo,
"phony_filter"};
} // namespace phony_filter
namespace phony_transport {
// Memory required for a single stream element - this is allocated by upper
// layers and initialized by the transport
size_t sizeof_stream; // = sizeof(transport stream)
// name of this transport implementation
const char* name;
// implementation of grpc_transport_init_stream
int InitStream(grpc_transport* /*self*/, grpc_stream* /*stream*/,
grpc_stream_refcount* /*refcount*/, const void* /*server_data*/,
grpc_core::Arena* /*arena*/) {
return 0;
}
// implementation of grpc_transport_set_pollset
void SetPollset(grpc_transport* /*self*/, grpc_stream* /*stream*/,
grpc_pollset* /*pollset*/) {}
// implementation of grpc_transport_set_pollset
void SetPollsetSet(grpc_transport* /*self*/, grpc_stream* /*stream*/,
grpc_pollset_set* /*pollset_set*/) {}
// implementation of grpc_transport_perform_stream_op
void PerformStreamOp(grpc_transport* /*self*/, grpc_stream* /*stream*/,
grpc_transport_stream_op_batch* op) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete, absl::OkStatus());
}
// implementation of grpc_transport_perform_op
void PerformOp(grpc_transport* /*self*/, grpc_transport_op* /*op*/) {}
// implementation of grpc_transport_destroy_stream
void DestroyStream(grpc_transport* /*self*/, grpc_stream* /*stream*/,
grpc_closure* /*then_sched_closure*/) {}
// implementation of grpc_transport_destroy
void Destroy(grpc_transport* /*self*/) {}
// implementation of grpc_transport_get_endpoint
grpc_endpoint* GetEndpoint(grpc_transport* /*self*/) { return nullptr; }
static const grpc_transport_vtable phony_transport_vtable = {
0, false, "phony_http2", InitStream,
nullptr, SetPollset, SetPollsetSet, PerformStreamOp,
PerformOp, DestroyStream, Destroy, GetEndpoint};
static grpc_transport phony_transport = {&phony_transport_vtable};
grpc_arg Arg() {
static const grpc_arg_pointer_vtable vtable = {
// copy
[](void* p) { return p; },
// destroy
[](void*) {},
// cmp
[](void* a, void* b) { return grpc_core::QsortCompare(a, b); },
};
return grpc_channel_arg_pointer_create(const_cast<char*>(GRPC_ARG_TRANSPORT),
&phony_transport, &vtable);
}
} // namespace phony_transport
class NoOp {
public:
class Op {
public:
Op(NoOp* /*p*/, grpc_call_stack* /*s*/, grpc_core::Arena*) {}
void Finish() {}
};
};
class SendEmptyMetadata {
public:
SendEmptyMetadata() : op_payload_(nullptr) {
op_ = {};
op_.on_complete = GRPC_CLOSURE_INIT(&closure_, DoNothing, nullptr,
grpc_schedule_on_exec_ctx);
op_.send_initial_metadata = true;
op_.payload = &op_payload_;
}
class Op {
public:
Op(SendEmptyMetadata* p, grpc_call_stack* /*s*/, grpc_core::Arena* arena)
: batch_(arena) {
p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_;
}
void Finish() {}
private:
grpc_metadata_batch batch_;
};
private:
const gpr_timespec deadline_ = gpr_inf_future(GPR_CLOCK_MONOTONIC);
const gpr_timespec start_time_ = gpr_now(GPR_CLOCK_MONOTONIC);
const grpc_slice method_ = grpc_slice_from_static_string("/foo/bar");
grpc_transport_stream_op_batch op_;
grpc_transport_stream_op_batch_payload op_payload_;
grpc_closure closure_;
};
// Test a filter in isolation. Fixture specifies the filter under test (use the
// Fixture<> template to specify this), and TestOp defines some unit of work to
// perform on said filter.
template <class Fixture, class TestOp>
static void BM_IsolatedFilter(benchmark::State& state) {
Fixture fixture;
std::ostringstream label;
FakeClientChannelFactory fake_client_channel_factory;
grpc_core::ChannelArgs channel_args =
grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(nullptr)
.SetObject(&fake_client_channel_factory)
.Set(GRPC_ARG_SERVER_URI, "localhost");
if (fixture.flags & REQUIRES_TRANSPORT) {
channel_args = channel_args.Set(phony_transport::Arg());
}
std::vector<const grpc_channel_filter*> filters;
if (fixture.filter != nullptr) {
filters.push_back(fixture.filter);
}
if (fixture.flags & CHECKS_NOT_LAST) {
filters.push_back(&phony_filter::phony_filter);
label << " #has_phony_filter";
}
grpc_core::ExecCtx exec_ctx;
size_t channel_size = grpc_channel_stack_size(
filters.empty() ? nullptr : &filters[0], filters.size());
grpc_channel_stack* channel_stack =
static_cast<grpc_channel_stack*>(gpr_zalloc(channel_size));
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"channel_stack_init",
grpc_channel_stack_init(1, FilterDestroy, channel_stack,
filters.empty() ? nullptr : &filters[0],
filters.size(), channel_args, "CHANNEL",
channel_stack)));
grpc_core::ExecCtx::Get()->Flush();
grpc_call_stack* call_stack =
static_cast<grpc_call_stack*>(gpr_zalloc(channel_stack->call_stack_size));
grpc_core::Timestamp deadline = grpc_core::Timestamp::InfFuture();
gpr_cycle_counter start_time = gpr_get_cycle_counter();
grpc_slice method = grpc_slice_from_static_string("/foo/bar");
grpc_call_final_info final_info;
TestOp test_op_data;
const int kArenaSize = 32 * 1024 * 1024;
grpc_call_context_element context[GRPC_CONTEXT_COUNT] = {};
grpc_core::MemoryAllocator memory_allocator =
grpc_core::MemoryAllocator(grpc_core::ResourceQuota::Default()
->memory_quota()
->CreateMemoryAllocator("test"));
grpc_call_element_args call_args{
call_stack,
nullptr,
context,
method,
start_time,
deadline,
grpc_core::Arena::Create(kArenaSize, &memory_allocator),
nullptr};
while (state.KeepRunning()) {
(void)grpc_call_stack_init(channel_stack, 1, DoNothing, nullptr,
&call_args);
typename TestOp::Op op(&test_op_data, call_stack, call_args.arena);
grpc_call_stack_destroy(call_stack, &final_info, nullptr);
op.Finish();
grpc_core::ExecCtx::Get()->Flush();
// recreate arena every 64k iterations to avoid oom
if (0 == (state.iterations() & 0xffff)) {
call_args.arena->Destroy();
call_args.arena = grpc_core::Arena::Create(kArenaSize, &memory_allocator);
}
}
call_args.arena->Destroy();
grpc_channel_stack_destroy(channel_stack);
grpc_core::ExecCtx::Get()->Flush();
gpr_free(channel_stack);
gpr_free(call_stack);
state.SetLabel(label.str());
}
typedef Fixture<nullptr, 0> NoFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, NoFilter, NoOp);
typedef Fixture<&phony_filter::phony_filter, 0> PhonyFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, PhonyFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, PhonyFilter, SendEmptyMetadata);
typedef Fixture<&grpc_core::ClientChannel::kFilterVtableWithoutPromises, 0>
ClientChannelFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ClientChannelFilter, NoOp);
typedef Fixture<&grpc_core::ClientCompressionFilter::kFilter, CHECKS_NOT_LAST>
ClientCompressFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ClientCompressFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ClientCompressFilter, SendEmptyMetadata);
typedef Fixture<&grpc_client_deadline_filter, CHECKS_NOT_LAST>
ClientDeadlineFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ClientDeadlineFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ClientDeadlineFilter, SendEmptyMetadata);
typedef Fixture<&grpc_server_deadline_filter, CHECKS_NOT_LAST>
ServerDeadlineFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ServerDeadlineFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ServerDeadlineFilter, SendEmptyMetadata);
typedef Fixture<&grpc_core::HttpClientFilter::kFilter,
CHECKS_NOT_LAST | REQUIRES_TRANSPORT>
HttpClientFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, HttpClientFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, HttpClientFilter, SendEmptyMetadata);
typedef Fixture<&grpc_core::HttpServerFilter::kFilter, CHECKS_NOT_LAST>
HttpServerFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, HttpServerFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, HttpServerFilter, SendEmptyMetadata);
typedef Fixture<&grpc_core::ServerCompressionFilter::kFilter, CHECKS_NOT_LAST>
ServerCompressFilter;
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ServerCompressFilter, NoOp);
BENCHMARK_TEMPLATE(BM_IsolatedFilter, ServerCompressFilter, SendEmptyMetadata);
// This cmake target is disabled for now because it depends on OpenCensus, which
// is Bazel-only.
// typedef Fixture<&grpc_server_load_reporting_filter, CHECKS_NOT_LAST>
// LoadReportingFilter;
// BENCHMARK_TEMPLATE(BM_IsolatedFilter, LoadReportingFilter, NoOp);
// BENCHMARK_TEMPLATE(BM_IsolatedFilter, LoadReportingFilter,
// SendEmptyMetadata);
////////////////////////////////////////////////////////////////////////////////
// Benchmarks isolating grpc_call
namespace isolated_call_filter {
typedef struct {
grpc_core::CallCombiner* call_combiner;
} call_data;
static void StartTransportStreamOp(grpc_call_element* elem,
grpc_transport_stream_op_batch* op) {
call_data* calld = static_cast<call_data*>(elem->call_data);
// Construct list of closures to return.
grpc_core::CallCombinerClosureList closures;
if (op->recv_initial_metadata) {
closures.Add(op->payload->recv_initial_metadata.recv_initial_metadata_ready,
absl::OkStatus(), "recv_initial_metadata");
}
if (op->recv_message) {
closures.Add(op->payload->recv_message.recv_message_ready, absl::OkStatus(),
"recv_message");
}
if (op->recv_trailing_metadata) {
closures.Add(
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready,
absl::OkStatus(), "recv_trailing_metadata");
}
if (op->on_complete != nullptr) {
closures.Add(op->on_complete, absl::OkStatus(), "on_complete");
}
// Execute closures.
closures.RunClosures(calld->call_combiner);
}
static void StartTransportOp(grpc_channel_element* /*elem*/,
grpc_transport_op* op) {
if (!op->disconnect_with_error.ok()) {
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, absl::OkStatus());
}
static grpc_error_handle InitCallElem(grpc_call_element* elem,
const grpc_call_element_args* args) {
call_data* calld = static_cast<call_data*>(elem->call_data);
calld->call_combiner = args->call_combiner;
return absl::OkStatus();
}
static void SetPollsetOrPollsetSet(grpc_call_element* /*elem*/,
grpc_polling_entity* /*pollent*/) {}
static void DestroyCallElem(grpc_call_element* /*elem*/,
const grpc_call_final_info* /*final_info*/,
grpc_closure* then_sched_closure) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, then_sched_closure, absl::OkStatus());
}
grpc_error_handle InitChannelElem(grpc_channel_element* /*elem*/,
grpc_channel_element_args* /*args*/) {
return absl::OkStatus();
}
void DestroyChannelElem(grpc_channel_element* /*elem*/) {}
void GetChannelInfo(grpc_channel_element* /*elem*/,
const grpc_channel_info* /*channel_info*/) {}
static const grpc_channel_filter isolated_call_filter = {
StartTransportStreamOp, nullptr,
StartTransportOp, sizeof(call_data),
InitCallElem, SetPollsetOrPollsetSet,
DestroyCallElem, 0,
InitChannelElem, grpc_channel_stack_no_post_init,
DestroyChannelElem, GetChannelInfo,
"isolated_call_filter"};
} // namespace isolated_call_filter
class IsolatedCallFixture {
public:
IsolatedCallFixture() {
// We are calling grpc_channel_stack_builder_create() instead of
// grpc_channel_create() here, which means we're not getting the
// grpc_init() called by grpc_channel_create(), but we are getting
// the grpc_shutdown() run by grpc_channel_destroy(). So we need to
// call grpc_init() manually here to balance things out.
grpc_init();
grpc_core::ChannelStackBuilderImpl builder(
"phony", GRPC_CLIENT_CHANNEL,
grpc_core::CoreConfiguration::Get()
.channel_args_preconditioning()
.PreconditionChannelArgs(nullptr));
builder.SetTarget("phony_target");
builder.AppendFilter(&isolated_call_filter::isolated_call_filter);
{
grpc_core::ExecCtx exec_ctx;
channel_ =
grpc_core::Channel::CreateWithBuilder(&builder)->release()->c_ptr();
}
cq_ = grpc_completion_queue_create_for_next(nullptr);
}
void Finish(benchmark::State&) {
grpc_completion_queue_destroy(cq_);
grpc_channel_destroy(channel_);
}
grpc_channel* channel() const { return channel_; }
grpc_completion_queue* cq() const { return cq_; }
private:
grpc_completion_queue* cq_;
grpc_channel* channel_;
};
static void BM_IsolatedCall_NoOp(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
for (auto _ : state) {
grpc_call_unref(grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, nullptr));
}
fixture.Finish(state);
}
BENCHMARK(BM_IsolatedCall_NoOp);
static void BM_IsolatedCall_Unary(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
grpc_slice slice = grpc_slice_from_static_string("hello world");
grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1);
grpc_byte_buffer* recv_message = nullptr;
grpc_status_code status_code;
grpc_slice status_details = grpc_empty_slice();
grpc_metadata_array recv_initial_metadata;
grpc_metadata_array_init(&recv_initial_metadata);
grpc_metadata_array recv_trailing_metadata;
grpc_metadata_array_init(&recv_trailing_metadata);
grpc_op ops[6];
memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[1].op = GRPC_OP_SEND_MESSAGE;
ops[1].data.send_message.send_message = send_message;
ops[2].op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
ops[3].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[3].data.recv_initial_metadata.recv_initial_metadata =
&recv_initial_metadata;
ops[4].op = GRPC_OP_RECV_MESSAGE;
ops[4].data.recv_message.recv_message = &recv_message;
ops[5].op = GRPC_OP_RECV_STATUS_ON_CLIENT;
ops[5].data.recv_status_on_client.status = &status_code;
ops[5].data.recv_status_on_client.status_details = &status_details;
ops[5].data.recv_status_on_client.trailing_metadata = &recv_trailing_metadata;
for (auto _ : state) {
grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, nullptr);
grpc_call_start_batch(call, ops, 6, tag(1), nullptr);
grpc_completion_queue_next(fixture.cq(),
gpr_inf_future(GPR_CLOCK_MONOTONIC), nullptr);
grpc_call_unref(call);
}
fixture.Finish(state);
grpc_metadata_array_destroy(&recv_initial_metadata);
grpc_metadata_array_destroy(&recv_trailing_metadata);
grpc_byte_buffer_destroy(send_message);
}
BENCHMARK(BM_IsolatedCall_Unary);
static void BM_IsolatedCall_StreamingSend(benchmark::State& state) {
IsolatedCallFixture fixture;
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
void* method_hdl = grpc_channel_register_call(fixture.channel(), "/foo/bar",
nullptr, nullptr);
grpc_slice slice = grpc_slice_from_static_string("hello world");
grpc_byte_buffer* send_message = grpc_raw_byte_buffer_create(&slice, 1);
grpc_metadata_array recv_initial_metadata;
grpc_metadata_array_init(&recv_initial_metadata);
grpc_metadata_array recv_trailing_metadata;
grpc_metadata_array_init(&recv_trailing_metadata);
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_INITIAL_METADATA;
ops[1].op = GRPC_OP_RECV_INITIAL_METADATA;
ops[1].data.recv_initial_metadata.recv_initial_metadata =
&recv_initial_metadata;
grpc_call* call = grpc_channel_create_registered_call(
fixture.channel(), nullptr, GRPC_PROPAGATE_DEFAULTS, fixture.cq(),
method_hdl, deadline, nullptr);
grpc_call_start_batch(call, ops, 2, tag(1), nullptr);
grpc_completion_queue_next(fixture.cq(), gpr_inf_future(GPR_CLOCK_MONOTONIC),
nullptr);
memset(ops, 0, sizeof(ops));
ops[0].op = GRPC_OP_SEND_MESSAGE;
ops[0].data.send_message.send_message = send_message;
for (auto _ : state) {
grpc_call_start_batch(call, ops, 1, tag(2), nullptr);
grpc_completion_queue_next(fixture.cq(),
gpr_inf_future(GPR_CLOCK_MONOTONIC), nullptr);
}
grpc_call_unref(call);
fixture.Finish(state);
grpc_metadata_array_destroy(&recv_initial_metadata);
grpc_metadata_array_destroy(&recv_trailing_metadata);
grpc_byte_buffer_destroy(send_message);
}
BENCHMARK(BM_IsolatedCall_StreamingSend);
// Some distros have RunSpecifiedBenchmarks under the benchmark namespace,
// and others do not. This allows us to support both modes.
namespace benchmark {
void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); }
} // namespace benchmark
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);
LibraryInitializer libInit;
::benchmark::Initialize(&argc, argv);
grpc::testing::InitTest(&argc, &argv, false);
benchmark::RunTheBenchmarksNamespaced();
return 0;
}

@ -144,18 +144,18 @@ class Fixture {
void FlushExecCtx() { grpc_core::ExecCtx::Get()->Flush(); }
~Fixture() { grpc_transport_destroy(t_); }
~Fixture() { t_->Orphan(); }
grpc_chttp2_transport* chttp2_transport() {
return reinterpret_cast<grpc_chttp2_transport*>(t_);
}
grpc_transport* transport() { return t_; }
grpc_core::Transport* transport() { return t_; }
void PushInput(grpc_slice slice) { ep_->PushInput(slice); }
private:
PhonyEndpoint* ep_;
grpc_transport* t_;
grpc_core::Transport* t_;
};
class TestClosure : public grpc_closure {
@ -194,7 +194,7 @@ grpc_closure* MakeOnceClosure(F f) {
class Stream {
public:
explicit Stream(Fixture* f) : f_(f) {
stream_size_ = grpc_transport_stream_size(f->transport());
stream_size_ = f->transport()->filter_stack_transport()->SizeOfStream();
stream_ = gpr_malloc(stream_size_);
arena_ = grpc_core::Arena::Create(4096, &memory_allocator_);
}
@ -214,9 +214,8 @@ class Stream {
arena_->Destroy();
arena_ = grpc_core::Arena::Create(4096, &memory_allocator_);
}
grpc_transport_init_stream(f_->transport(),
static_cast<grpc_stream*>(stream_), &refcount_,
nullptr, arena_);
f_->transport()->filter_stack_transport()->InitStream(
static_cast<grpc_stream*>(stream_), &refcount_, nullptr, arena_);
}
void DestroyThen(grpc_closure* closure) {
@ -229,8 +228,8 @@ class Stream {
}
void Op(grpc_transport_stream_op_batch* op) {
grpc_transport_perform_stream_op(f_->transport(),
static_cast<grpc_stream*>(stream_), op);
f_->transport()->filter_stack_transport()->PerformStreamOp(
static_cast<grpc_stream*>(stream_), op);
}
grpc_chttp2_stream* chttp2_stream() {
@ -240,9 +239,8 @@ class Stream {
private:
static void FinishDestroy(void* arg, grpc_error_handle /*error*/) {
auto stream = static_cast<Stream*>(arg);
grpc_transport_destroy_stream(stream->f_->transport(),
static_cast<grpc_stream*>(stream->stream_),
stream->destroy_closure_);
stream->f_->transport()->filter_stack_transport()->DestroyStream(
static_cast<grpc_stream*>(stream->stream_), stream->destroy_closure_);
gpr_event_set(&stream->done_, reinterpret_cast<void*>(1));
}

@ -235,8 +235,8 @@ class EndpointPairFixture : public BaseFixture {
protected:
grpc_endpoint_pair endpoint_pair_;
grpc_transport* client_transport_;
grpc_transport* server_transport_;
grpc_core::Transport* client_transport_;
grpc_core::Transport* server_transport_;
private:
std::unique_ptr<Server> server_;

@ -74,7 +74,7 @@ class EndpointPairFixture {
{
grpc_core::Server* core_server =
grpc_core::Server::FromC(server_->c_server());
grpc_transport* transport = grpc_create_chttp2_transport(
grpc_core::Transport* transport = grpc_create_chttp2_transport(
core_server->channel_args(), endpoints.server, false /* is_client */);
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(endpoints.server, pollset);
@ -96,7 +96,7 @@ class EndpointPairFixture {
.Set(GRPC_ARG_DEFAULT_AUTHORITY, "test.authority");
ApplyCommonChannelArguments(&args);
grpc_transport* transport =
grpc_core::Transport* transport =
grpc_create_chttp2_transport(args, endpoints.client, true);
GPR_ASSERT(transport);
grpc_channel* channel =

@ -2722,7 +2722,6 @@ src/core/lib/transport/timeout_encoding.h \
src/core/lib/transport/transport.cc \
src/core/lib/transport/transport.h \
src/core/lib/transport/transport_fwd.h \
src/core/lib/transport/transport_impl.h \
src/core/lib/transport/transport_op_string.cc \
src/core/lib/uri/uri_parser.cc \
src/core/lib/uri/uri_parser.h \

@ -2504,7 +2504,6 @@ src/core/lib/transport/timeout_encoding.h \
src/core/lib/transport/transport.cc \
src/core/lib/transport/transport.h \
src/core/lib/transport/transport_fwd.h \
src/core/lib/transport/transport_impl.h \
src/core/lib/transport/transport_op_string.cc \
src/core/lib/uri/uri_parser.cc \
src/core/lib/uri/uri_parser.h \

@ -23,7 +23,7 @@ cd $(dirname $0)/../../..
time python3 -m pip install --user -r tools/internal_ci/helper_scripts/requirements.linux_perf.txt
# List of benchmarks that provide good signal for analyzing performance changes in pull requests
BENCHMARKS_TO_RUN="bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong bm_fullstack_streaming_pump bm_closure bm_cq bm_call_create bm_chttp2_hpack bm_chttp2_transport bm_pollset"
BENCHMARKS_TO_RUN="bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong bm_fullstack_streaming_pump bm_closure bm_cq bm_chttp2_hpack bm_chttp2_transport bm_pollset"
tools/run_tests/start_port_server.py

@ -21,7 +21,6 @@ _AVAILABLE_BENCHMARK_TESTS = [
"bm_fullstack_streaming_pump",
"bm_closure",
"bm_cq",
"bm_call_create",
"bm_chttp2_hpack",
"bm_chttp2_transport",
"bm_pollset",

Loading…
Cancel
Save