Move resource_user ownership into chttp2 transport/server/connector v2 (#27032)

Reintroducing PR #26643, which was reverted in #27029

Fixed a memory leak and added a test that would have caught it (ASAN build): ca0c8c4
pull/26918/head
AJ Heller 4 years ago committed by GitHub
parent 46547d5690
commit d10617edb5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      CMakeLists.txt
  2. 6
      build_autogenerated.yaml
  3. 2
      gRPC-Core.podspec
  4. 2
      grpc.gyp
  5. 7
      include/grpc/event_engine/slice_allocator.h
  6. 3
      include/grpc/grpc_posix.h
  7. 1
      src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc
  8. 29
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  9. 2
      src/core/ext/transport/chttp2/client/chttp2_connector.h
  10. 2
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  11. 16
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
  12. 2
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
  13. 84
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  14. 18
      src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
  15. 25
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  16. 5
      src/core/ext/transport/chttp2/transport/chttp2_transport.h
  17. 5
      src/core/ext/transport/chttp2/transport/flow_control.cc
  18. 4
      src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
  19. 3
      src/core/ext/transport/inproc/inproc_transport.cc
  20. 12
      src/core/lib/channel/channel_stack_builder.cc
  21. 8
      src/core/lib/channel/channel_stack_builder.h
  22. 18
      src/core/lib/http/httpcli.cc
  23. 3
      src/core/lib/http/httpcli.h
  24. 4
      src/core/lib/iomgr/endpoint.cc
  25. 3
      src/core/lib/iomgr/endpoint.h
  26. 27
      src/core/lib/iomgr/endpoint_cfstream.cc
  27. 2
      src/core/lib/iomgr/endpoint_cfstream.h
  28. 1
      src/core/lib/iomgr/endpoint_pair.h
  29. 18
      src/core/lib/iomgr/endpoint_pair_posix.cc
  30. 6
      src/core/lib/iomgr/endpoint_pair_uv.cc
  31. 18
      src/core/lib/iomgr/endpoint_pair_windows.cc
  32. 21
      src/core/lib/iomgr/event_engine/endpoint.cc
  33. 3
      src/core/lib/iomgr/event_engine/endpoint.h
  34. 77
      src/core/lib/iomgr/event_engine/tcp.cc
  35. 167
      src/core/lib/iomgr/resource_quota.cc
  36. 83
      src/core/lib/iomgr/resource_quota.h
  37. 6
      src/core/lib/iomgr/tcp_client.cc
  38. 3
      src/core/lib/iomgr/tcp_client.h
  39. 24
      src/core/lib/iomgr/tcp_client_cfstream.cc
  40. 23
      src/core/lib/iomgr/tcp_client_custom.cc
  41. 30
      src/core/lib/iomgr/tcp_client_posix.cc
  42. 7
      src/core/lib/iomgr/tcp_client_posix.h
  43. 13
      src/core/lib/iomgr/tcp_client_windows.cc
  44. 26
      src/core/lib/iomgr/tcp_custom.cc
  45. 3
      src/core/lib/iomgr/tcp_custom.h
  46. 54
      src/core/lib/iomgr/tcp_posix.cc
  47. 20
      src/core/lib/iomgr/tcp_posix.h
  48. 10
      src/core/lib/iomgr/tcp_server.cc
  49. 17
      src/core/lib/iomgr/tcp_server.h
  50. 41
      src/core/lib/iomgr/tcp_server_custom.cc
  51. 33
      src/core/lib/iomgr/tcp_server_posix.cc
  52. 3
      src/core/lib/iomgr/tcp_server_utils_posix.h
  53. 19
      src/core/lib/iomgr/tcp_server_windows.cc
  54. 28
      src/core/lib/iomgr/tcp_windows.cc
  55. 3
      src/core/lib/iomgr/tcp_windows.h
  56. 3
      src/core/lib/security/credentials/external/aws_external_account_credentials.cc
  57. 2
      src/core/lib/security/credentials/external/external_account_credentials.cc
  58. 1
      src/core/lib/security/credentials/external/url_external_account_credentials.cc
  59. 1
      src/core/lib/security/credentials/google_default/google_default_credentials.cc
  60. 2
      src/core/lib/security/credentials/jwt/jwt_verifier.cc
  61. 3
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  62. 7
      src/core/lib/security/transport/secure_endpoint.cc
  63. 31
      src/core/lib/surface/channel.cc
  64. 16
      src/core/lib/surface/channel.h
  65. 4
      src/core/lib/surface/lame_client.cc
  66. 24
      src/core/lib/surface/server.cc
  67. 8
      src/core/lib/surface/server.h
  68. 14
      test/core/bad_client/bad_client.cc
  69. 12
      test/core/bad_connection/close_fd_test.cc
  70. 44
      test/core/end2end/fixtures/h2_sockpair+trace.cc
  71. 44
      test/core/end2end/fixtures/h2_sockpair.cc
  72. 42
      test/core/end2end/fixtures/h2_sockpair_1byte.cc
  73. 9
      test/core/end2end/fixtures/http_proxy_fixture.cc
  74. 21
      test/core/end2end/fuzzers/client_fuzzer.cc
  75. 16
      test/core/end2end/fuzzers/server_fuzzer.cc
  76. 2
      test/core/http/httpcli_test.cc
  77. 2
      test/core/http/httpscli_test.cc
  78. 1
      test/core/iomgr/endpoint_pair_test.cc
  79. 5
      test/core/iomgr/fd_conservation_posix_test.cc
  80. 9
      test/core/iomgr/ios/CFStreamTests/CFStreamClientTests.mm
  81. 4
      test/core/iomgr/ios/CFStreamTests/CFStreamEndpointTests.mm
  82. 1
      test/core/iomgr/ios/CFStreamTests/Podfile
  83. 177
      test/core/iomgr/resource_quota_test.cc
  84. 67
      test/core/iomgr/tcp_client_posix_test.cc
  85. 13
      test/core/iomgr/tcp_client_uv_test.cc
  86. 26
      test/core/iomgr/tcp_posix_test.cc
  87. 30
      test/core/iomgr/tcp_server_posix_test.cc
  88. 30
      test/core/iomgr/tcp_server_uv_test.cc
  89. 1
      test/core/security/secure_endpoint_test.cc
  90. 9
      test/core/security/ssl_server_fuzzer.cc
  91. 5
      test/core/surface/concurrent_connectivity_test.cc
  92. 24
      test/core/transport/chttp2/context_list_test.cc
  93. 8
      test/core/transport/chttp2/settings_timeout_test.cc
  94. 3
      test/core/util/BUILD
  95. 4
      test/core/util/mock_authorization_endpoint.h
  96. 19
      test/core/util/mock_endpoint.cc
  97. 2
      test/core/util/mock_endpoint.h
  98. 23
      test/core/util/passthru_endpoint.cc
  99. 7
      test/core/util/passthru_endpoint.h
  100. 3
      test/core/util/port_server_client.cc
  101. Some files were not shown because too many files have changed in this diff Show More

3
CMakeLists.txt generated

@ -2241,6 +2241,7 @@ add_library(grpc_test_util
test/core/util/port_server_client.cc
test/core/util/reconnect_server.cc
test/core/util/resolve_localhost_ip46.cc
test/core/util/resource_user_util.cc
test/core/util/slice_splitter.cc
test/core/util/stack_tracer.cc
test/core/util/subprocess_posix.cc
@ -2310,6 +2311,7 @@ add_library(grpc_test_util_unsecure
test/core/util/port_server_client.cc
test/core/util/reconnect_server.cc
test/core/util/resolve_localhost_ip46.cc
test/core/util/resource_user_util.cc
test/core/util/slice_splitter.cc
test/core/util/stack_tracer.cc
test/core/util/subprocess_posix.cc
@ -15413,6 +15415,7 @@ if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
test/core/util/port_server_client.cc
test/core/util/reconnect_server.cc
test/core/util/resolve_localhost_ip46.cc
test/core/util/resource_user_util.cc
test/core/util/slice_splitter.cc
test/core/util/stack_tracer.cc
test/core/util/subprocess_posix.cc

@ -1549,6 +1549,7 @@ libs:
- test/core/util/port_server_client.h
- test/core/util/reconnect_server.h
- test/core/util/resolve_localhost_ip46.h
- test/core/util/resource_user_util.h
- test/core/util/slice_splitter.h
- test/core/util/stack_tracer.h
- test/core/util/subprocess.h
@ -1571,6 +1572,7 @@ libs:
- test/core/util/port_server_client.cc
- test/core/util/reconnect_server.cc
- test/core/util/resolve_localhost_ip46.cc
- test/core/util/resource_user_util.cc
- test/core/util/slice_splitter.cc
- test/core/util/stack_tracer.cc
- test/core/util/subprocess_posix.cc
@ -1604,6 +1606,7 @@ libs:
- test/core/util/port_server_client.h
- test/core/util/reconnect_server.h
- test/core/util/resolve_localhost_ip46.h
- test/core/util/resource_user_util.h
- test/core/util/slice_splitter.h
- test/core/util/stack_tracer.h
- test/core/util/subprocess.h
@ -1625,6 +1628,7 @@ libs:
- test/core/util/port_server_client.cc
- test/core/util/reconnect_server.cc
- test/core/util/resolve_localhost_ip46.cc
- test/core/util/resource_user_util.cc
- test/core/util/slice_splitter.cc
- test/core/util/stack_tracer.cc
- test/core/util/subprocess_posix.cc
@ -6926,6 +6930,7 @@ targets:
- test/core/util/port_server_client.h
- test/core/util/reconnect_server.h
- test/core/util/resolve_localhost_ip46.h
- test/core/util/resource_user_util.h
- test/core/util/slice_splitter.h
- test/core/util/stack_tracer.h
- test/core/util/subprocess.h
@ -6950,6 +6955,7 @@ targets:
- test/core/util/port_server_client.cc
- test/core/util/reconnect_server.cc
- test/core/util/resolve_localhost_ip46.cc
- test/core/util/resource_user_util.cc
- test/core/util/slice_splitter.cc
- test/core/util/stack_tracer.cc
- test/core/util/subprocess_posix.cc

2
gRPC-Core.podspec generated

@ -2230,6 +2230,8 @@ Pod::Spec.new do |s|
'test/core/util/reconnect_server.h',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/resolve_localhost_ip46.h',
'test/core/util/resource_user_util.cc',
'test/core/util/resource_user_util.h',
'test/core/util/slice_splitter.cc',
'test/core/util/slice_splitter.h',
'test/core/util/stack_tracer.cc',

2
grpc.gyp generated

@ -1082,6 +1082,7 @@
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/resource_user_util.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
@ -1116,6 +1117,7 @@
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/resource_user_util.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',

@ -28,11 +28,16 @@ struct grpc_slice_buffer;
namespace grpc_event_engine {
namespace experimental {
// TODO(hork): stubbed out here, to be replaced with a real version in next PR.
// TODO(nnoble): needs implementation
class SliceBuffer {
public:
SliceBuffer() { abort(); }
explicit SliceBuffer(grpc_slice_buffer*) { abort(); }
grpc_slice_buffer* RawSliceBuffer() { return slice_buffer_; }
private:
grpc_slice_buffer* slice_buffer_;
};
class SliceAllocator {

@ -48,6 +48,9 @@ GRPCAPI grpc_channel* grpc_insecure_channel_create_from_fd(
grpc_server_register_completion_queue API).
The 'reserved' pointer MUST be NULL.
TODO(hork): add channel_args to this API to allow endpoints and transports
created in this function to participate in the resource quota feature.
*/
GRPCAPI void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
void* reserved, int fd);

@ -132,7 +132,6 @@ GoogleCloud2ProdResolver::MetadataQuery::MetadataQuery(
grpc_httpcli_get(&context_, pollent, resource_quota, &request,
ExecCtx::Get()->Now() + 10000, // 10s timeout
&on_done_, &response_);
grpc_resource_quota_unref_internal(resource_quota);
}
GoogleCloud2ProdResolver::MetadataQuery::~MetadataQuery() {

@ -32,6 +32,7 @@
#include "src/core/ext/filters/client_channel/http_connect_handshaker.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/address_utils/sockaddr_utils.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/channel/handshaker_registry.h"
@ -45,7 +46,12 @@ Chttp2Connector::Chttp2Connector() {
}
Chttp2Connector::~Chttp2Connector() {
if (endpoint_ != nullptr) grpc_endpoint_destroy(endpoint_);
if (resource_quota_ != nullptr) {
grpc_resource_quota_unref_internal(resource_quota_);
}
if (endpoint_ != nullptr) {
grpc_endpoint_destroy(endpoint_);
}
}
void Chttp2Connector::Connect(const Args& args, Result* result,
@ -63,6 +69,11 @@ void Chttp2Connector::Connect(const Args& args, Result* result,
connecting_ = true;
GPR_ASSERT(endpoint_ == nullptr);
ep = &endpoint_;
if (resource_quota_ != nullptr) {
grpc_resource_quota_unref_internal(resource_quota_);
}
resource_quota_ =
grpc_resource_quota_from_channel_args(args.channel_args, true);
}
// In some implementations, the closure can be flushed before
// grpc_tcp_client_connect() returns, and since the closure requires access
@ -71,8 +82,12 @@ void Chttp2Connector::Connect(const Args& args, Result* result,
// grpc_tcp_client_connect() will fill endpoint_ with proper contents, and we
// make sure that we still exist at that point by taking a ref.
Ref().release(); // Ref held by callback.
grpc_tcp_client_connect(&connected_, ep, args.interested_parties,
args.channel_args, &addr, args.deadline);
grpc_tcp_client_connect(
&connected_, ep,
grpc_slice_allocator_create(resource_quota_,
grpc_sockaddr_to_string(&addr, false),
args.channel_args),
args.interested_parties, args.channel_args, &addr, args.deadline);
}
void Chttp2Connector::Shutdown(grpc_error_handle error) {
@ -165,8 +180,12 @@ void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
self->result_->Reset();
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
} else if (args->endpoint != nullptr) {
self->result_->transport =
grpc_create_chttp2_transport(args->args, args->endpoint, true);
self->result_->transport = grpc_create_chttp2_transport(
args->args, args->endpoint, true,
grpc_resource_user_create(
self->resource_quota_,
absl::StrCat(grpc_endpoint_get_peer(args->endpoint),
":connector_transport")));
self->result_->socket_node =
grpc_chttp2_transport_get_socket_node(self->result_->transport);
self->result_->channel_args = args->args;

@ -24,6 +24,7 @@
#include "src/core/ext/filters/client_channel/connector.h"
#include "src/core/lib/channel/handshaker.h"
#include "src/core/lib/channel/handshaker_registry.h"
#include "src/core/lib/iomgr/resource_quota.h"
namespace grpc_core {
@ -68,6 +69,7 @@ class Chttp2Connector : public SubchannelConnector {
grpc_closure on_timeout_;
absl::optional<grpc_error_handle> notify_error_;
RefCountedPtr<HandshakeManager> handshake_mgr_;
grpc_resource_quota* resource_quota_ = nullptr;
};
} // namespace grpc_core

@ -68,7 +68,7 @@ grpc_channel* CreateChannel(const char* target, const grpc_channel_args* args,
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel = grpc_channel_create(
target, new_args, GRPC_CLIENT_CHANNEL, nullptr, nullptr, error);
target, new_args, GRPC_CLIENT_CHANNEL, nullptr, nullptr, 0, error);
grpc_channel_args_destroy(new_args);
return channel;
}

@ -49,17 +49,21 @@ grpc_channel* grpc_insecure_channel_create_from_fd(
int flags = fcntl(fd, F_GETFL, 0);
GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
grpc_resource_quota* resource_quota =
grpc_resource_quota_from_channel_args(args, true);
grpc_slice_allocator* allocator = grpc_slice_allocator_create(
resource_quota, "fd-client:endpoint", final_args);
grpc_endpoint* client = grpc_tcp_client_create_from_fd(
grpc_fd_create(fd, "client", true), args, "fd-client");
grpc_transport* transport =
grpc_create_chttp2_transport(final_args, client, true);
grpc_fd_create(fd, "client", true), args, "fd-client", allocator);
grpc_transport* transport = grpc_create_chttp2_transport(
final_args, client, true,
grpc_resource_user_create(resource_quota, "fd-client:transport"));
grpc_resource_quota_unref_internal(resource_quota);
GPR_ASSERT(transport);
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_channel* channel =
grpc_channel_create(target, final_args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, &error);
transport, nullptr, 0, &error);
grpc_channel_args_destroy(final_args);
if (channel != nullptr) {
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);

@ -146,7 +146,7 @@ grpc_channel* CreateChannel(const char* target, const grpc_channel_args* args,
grpc_channel_args* new_args =
grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
grpc_channel* channel = grpc_channel_create(
target, new_args, GRPC_CLIENT_CHANNEL, nullptr, nullptr, error);
target, new_args, GRPC_CLIENT_CHANNEL, nullptr, nullptr, 0, error);
grpc_channel_args_destroy(new_args);
return channel;
}

@ -72,7 +72,8 @@ class Chttp2ServerListener : public Server::ListenerInterface {
// Do not instantiate directly. Use one of the factory methods above.
Chttp2ServerListener(Server* server, grpc_channel_args* args,
Chttp2ServerArgsModifier args_modifier);
Chttp2ServerArgsModifier args_modifier,
grpc_resource_quota* resource_quota);
~Chttp2ServerListener() override;
void Start(Server* server,
@ -110,7 +111,8 @@ class Chttp2ServerListener : public Server::ListenerInterface {
HandshakingState(RefCountedPtr<ActiveConnection> connection_ref,
grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor,
grpc_channel_args* args);
grpc_channel_args* args,
grpc_resource_user* channel_resource_user);
~HandshakingState() override;
@ -136,11 +138,13 @@ class Chttp2ServerListener : public Server::ListenerInterface {
grpc_closure on_timeout_ ABSL_GUARDED_BY(&connection_->mu_);
grpc_closure on_receive_settings_ ABSL_GUARDED_BY(&connection_->mu_);
grpc_pollset_set* const interested_parties_;
grpc_resource_user* channel_resource_user_;
};
ActiveConnection(grpc_pollset* accepting_pollset,
grpc_tcp_server_acceptor* acceptor,
grpc_channel_args* args);
grpc_channel_args* args,
grpc_resource_user* channel_resource_user);
~ActiveConnection() override;
void Orphan() override;
@ -234,6 +238,7 @@ class Chttp2ServerListener : public Server::ListenerInterface {
grpc_closure tcp_server_shutdown_complete_ ABSL_GUARDED_BY(mu_);
grpc_closure* on_destroy_done_ ABSL_GUARDED_BY(mu_) = nullptr;
RefCountedPtr<channelz::ListenSocketNode> channelz_listen_socket_;
grpc_resource_quota* resource_quota_;
};
//
@ -305,13 +310,14 @@ grpc_millis GetConnectionDeadline(const grpc_channel_args* args) {
Chttp2ServerListener::ActiveConnection::HandshakingState::HandshakingState(
RefCountedPtr<ActiveConnection> connection_ref,
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
grpc_channel_args* args)
grpc_channel_args* args, grpc_resource_user* channel_resource_user)
: connection_(std::move(connection_ref)),
accepting_pollset_(accepting_pollset),
acceptor_(acceptor),
handshake_mgr_(MakeRefCounted<HandshakeManager>()),
deadline_(GetConnectionDeadline(args)),
interested_parties_(grpc_pollset_set_create()) {
interested_parties_(grpc_pollset_set_create()),
channel_resource_user_(channel_resource_user) {
grpc_pollset_set_add_pollset(interested_parties_, accepting_pollset_);
HandshakerRegistry::AddHandshakers(HANDSHAKER_SERVER, args,
interested_parties_, handshake_mgr_.get());
@ -320,6 +326,9 @@ Chttp2ServerListener::ActiveConnection::HandshakingState::HandshakingState(
Chttp2ServerListener::ActiveConnection::HandshakingState::~HandshakingState() {
grpc_pollset_set_del_pollset(interested_parties_, accepting_pollset_);
grpc_pollset_set_destroy(interested_parties_);
if (channel_resource_user_ != nullptr) {
grpc_resource_user_unref(channel_resource_user_);
}
gpr_free(acceptor_);
}
@ -380,16 +389,12 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
OrphanablePtr<HandshakingState> handshaking_state_ref;
RefCountedPtr<HandshakeManager> handshake_mgr;
bool cleanup_connection = false;
bool free_resource_quota = false;
grpc_resource_user* resource_user =
self->connection_->listener_->server_->default_resource_user();
{
MutexLock connection_lock(&self->connection_->mu_);
if (error != GRPC_ERROR_NONE || self->connection_->shutdown_) {
std::string error_str = grpc_error_std_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str.c_str());
cleanup_connection = true;
free_resource_quota = true;
if (error == GRPC_ERROR_NONE && args->endpoint != nullptr) {
// We were shut down or stopped serving after handshaking completed
// successfully, so destroy the endpoint here.
@ -409,12 +414,17 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// code, so we can just clean up here without creating a transport.
if (args->endpoint != nullptr) {
grpc_transport* transport = grpc_create_chttp2_transport(
args->args, args->endpoint, false, resource_user);
args->args, args->endpoint, false,
grpc_resource_user_create(
self->connection_->listener_->resource_quota_,
absl::StrCat(grpc_endpoint_get_peer(args->endpoint),
":chttp2_server_transport")));
grpc_error_handle channel_init_err =
self->connection_->listener_->server_->SetupTransport(
transport, self->accepting_pollset_, args->args,
grpc_chttp2_transport_get_socket_node(transport),
resource_user);
self->channel_resource_user_, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
self->channel_resource_user_ = nullptr;
if (channel_init_err == GRPC_ERROR_NONE) {
// Use notify_on_receive_settings callback to enforce the
// handshake deadline.
@ -462,12 +472,10 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
grpc_slice_buffer_destroy_internal(args->read_buffer);
gpr_free(args->read_buffer);
cleanup_connection = true;
free_resource_quota = true;
grpc_channel_args_destroy(args->args);
}
} else {
cleanup_connection = true;
free_resource_quota = true;
}
}
// Since the handshake manager is done, the connection no longer needs to
@ -480,8 +488,9 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
gpr_free(self->acceptor_);
self->acceptor_ = nullptr;
OrphanablePtr<ActiveConnection> connection;
if (free_resource_quota && resource_user != nullptr) {
grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
if (self->channel_resource_user_ != nullptr) {
grpc_resource_user_free(self->channel_resource_user_,
GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
}
if (cleanup_connection) {
MutexLock listener_lock(&self->connection_->listener_->mu_);
@ -501,9 +510,9 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
Chttp2ServerListener::ActiveConnection::ActiveConnection(
grpc_pollset* accepting_pollset, grpc_tcp_server_acceptor* acceptor,
grpc_channel_args* args)
grpc_channel_args* args, grpc_resource_user* channel_resource_user)
: handshaking_state_(MakeOrphanable<HandshakingState>(
Ref(), accepting_pollset, acceptor, args)) {
Ref(), accepting_pollset, acceptor, args, channel_resource_user)) {
GRPC_CLOSURE_INIT(&on_close_, ActiveConnection::OnClose, this,
grpc_schedule_on_exec_ctx);
}
@ -587,9 +596,14 @@ grpc_error_handle Chttp2ServerListener::Create(
// easier without using goto.
grpc_error_handle error = [&]() {
// Create Chttp2ServerListener.
listener = new Chttp2ServerListener(server, args, args_modifier);
error = grpc_tcp_server_create(&listener->tcp_server_shutdown_complete_,
args, &listener->tcp_server_);
listener = new Chttp2ServerListener(
server, args, args_modifier,
grpc_resource_quota_from_channel_args(args, true));
grpc_resource_quota_ref_internal(listener->resource_quota_);
error = grpc_tcp_server_create(
&listener->tcp_server_shutdown_complete_, args,
grpc_slice_allocator_factory_create(listener->resource_quota_),
&listener->tcp_server_);
if (error != GRPC_ERROR_NONE) return error;
if (server->config_fetcher() != nullptr) {
listener->resolved_address_ = *addr;
@ -630,10 +644,14 @@ grpc_error_handle Chttp2ServerListener::Create(
grpc_error_handle Chttp2ServerListener::CreateWithAcceptor(
Server* server, const char* name, grpc_channel_args* args,
Chttp2ServerArgsModifier args_modifier) {
Chttp2ServerListener* listener =
new Chttp2ServerListener(server, args, args_modifier);
Chttp2ServerListener* listener = new Chttp2ServerListener(
server, args, args_modifier,
grpc_resource_quota_from_channel_args(args, true));
grpc_resource_quota_ref_internal(listener->resource_quota_);
grpc_error_handle error = grpc_tcp_server_create(
&listener->tcp_server_shutdown_complete_, args, &listener->tcp_server_);
&listener->tcp_server_shutdown_complete_, args,
grpc_slice_allocator_factory_create(listener->resource_quota_),
&listener->tcp_server_);
if (error != GRPC_ERROR_NONE) {
delete listener;
return error;
@ -648,8 +666,11 @@ grpc_error_handle Chttp2ServerListener::CreateWithAcceptor(
Chttp2ServerListener::Chttp2ServerListener(
Server* server, grpc_channel_args* args,
Chttp2ServerArgsModifier args_modifier)
: server_(server), args_modifier_(args_modifier), args_(args) {
Chttp2ServerArgsModifier args_modifier, grpc_resource_quota* resource_quota)
: server_(server),
args_modifier_(args_modifier),
args_(args),
resource_quota_(resource_quota) {
GRPC_CLOSURE_INIT(&tcp_server_shutdown_complete_, TcpServerShutdownComplete,
this, grpc_schedule_on_exec_ctx);
}
@ -662,6 +683,7 @@ Chttp2ServerListener::~Chttp2ServerListener() {
ExecCtx::Run(DEBUG_LOCATION, on_destroy_done_, GRPC_ERROR_NONE);
ExecCtx::Get()->Flush();
}
grpc_resource_quota_unref_internal(resource_quota_);
grpc_channel_args_destroy(args_);
}
@ -744,8 +766,11 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
return;
}
}
auto connection =
MakeOrphanable<ActiveConnection>(accepting_pollset, acceptor, args);
grpc_resource_user* channel_resource_user = grpc_resource_user_create(
self->resource_quota_,
absl::StrCat(grpc_endpoint_get_peer(tcp), ":server_channel"));
auto connection = MakeOrphanable<ActiveConnection>(
accepting_pollset, acceptor, args, channel_resource_user);
// We no longer own acceptor
acceptor = nullptr;
// Hold a ref to connection to allow starting handshake outside the
@ -756,10 +781,7 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
MutexLock lock(&self->mu_);
// Shutdown the the connection if listener's stopped serving.
if (!self->shutdown_ && self->is_serving_) {
grpc_resource_user* resource_user =
self->server_->default_resource_user();
if (resource_user != nullptr &&
!grpc_resource_user_safe_alloc(resource_user,
if (!grpc_resource_user_safe_alloc(channel_resource_user,
GRPC_RESOURCE_QUOTA_CHANNEL_SIZE)) {
gpr_log(
GPR_INFO,

@ -45,14 +45,20 @@ void grpc_server_add_insecure_channel_from_fd(grpc_server* server,
const grpc_channel_args* server_args = core_server->channel_args();
std::string name = absl::StrCat("fd:", fd);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create(name.c_str());
grpc_endpoint* server_endpoint = grpc_tcp_create(
grpc_fd_create(fd, name.c_str(), true), server_args, name.c_str());
grpc_fd_create(fd, name.c_str(), true), server_args, name.c_str(),
grpc_slice_allocator_create(resource_quota, name, server_args));
grpc_transport* transport = grpc_create_chttp2_transport(
server_args, server_endpoint, false /* is_client */);
grpc_error_handle error =
core_server->SetupTransport(transport, nullptr, server_args, nullptr);
server_args, server_endpoint, false /* is_client */,
grpc_resource_user_create(resource_quota,
absl::StrCat(name, ":transport")));
grpc_error_handle error = core_server->SetupTransport(
transport, nullptr, server_args, nullptr,
grpc_resource_user_create(resource_quota,
absl::StrCat(name, ":channel")));
grpc_resource_quota_unref_internal(resource_quota);
if (error == GRPC_ERROR_NONE) {
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(server_endpoint, pollset);

@ -538,6 +538,8 @@ grpc_chttp2_transport::grpc_chttp2_transport(
static void destroy_transport_locked(void* tp, grpc_error_handle /*error*/) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->destroying = 1;
grpc_resource_user_shutdown(t->resource_user);
grpc_resource_user_unref(t->resource_user);
close_transport_locked(
t, grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
@ -712,13 +714,10 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
GRPC_ERROR_UNREF(read_closed_error);
GRPC_ERROR_UNREF(write_closed_error);
GRPC_ERROR_UNREF(byte_stream_error);
flow_control.Destroy();
if (t->resource_user != nullptr) {
if (!t->is_client) {
grpc_resource_user_free(t->resource_user, GRPC_RESOURCE_QUOTA_CALL_SIZE);
}
GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, destroy_stream_arg, GRPC_ERROR_NONE);
}
@ -770,8 +769,8 @@ grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
// Don't accept the stream if memory quota doesn't allow. Note that we should
// simply refuse the stream here instead of canceling the stream after it's
// accepted since the latter will create the call which costs much memory.
if (t->resource_user != nullptr &&
!grpc_resource_user_safe_alloc(t->resource_user,
GPR_ASSERT(t->resource_user != nullptr);
if (!grpc_resource_user_safe_alloc(t->resource_user,
GRPC_RESOURCE_QUOTA_CALL_SIZE)) {
gpr_log(GPR_INFO, "Memory exhausted, rejecting the stream.");
grpc_chttp2_add_rst_stream_to_next_write(t, id, GRPC_HTTP2_REFUSED_STREAM,
@ -3161,8 +3160,8 @@ static void post_benign_reclaimer(grpc_chttp2_transport* t) {
GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer, t,
grpc_schedule_on_exec_ctx);
grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep),
false, &t->benign_reclaimer_locked);
grpc_resource_user_post_reclaimer(t->resource_user, false,
&t->benign_reclaimer_locked);
}
}
@ -3172,8 +3171,8 @@ static void post_destructive_reclaimer(grpc_chttp2_transport* t) {
GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked, destructive_reclaimer,
t, grpc_schedule_on_exec_ctx);
grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep),
true, &t->destructive_reclaimer_locked);
grpc_resource_user_post_reclaimer(t->resource_user, true,
&t->destructive_reclaimer_locked);
}
}
@ -3208,8 +3207,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error_handle error) {
}
t->benign_reclaimer_registered = false;
if (error != GRPC_ERROR_CANCELLED) {
grpc_resource_user_finish_reclamation(
grpc_endpoint_get_resource_user(t->ep));
grpc_resource_user_finish_reclamation(t->resource_user);
}
GRPC_CHTTP2_UNREF_TRANSPORT(t, "benign_reclaimer");
}
@ -3246,8 +3244,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error_handle error) {
}
}
if (error != GRPC_ERROR_CANCELLED) {
grpc_resource_user_finish_reclamation(
grpc_endpoint_get_resource_user(t->ep));
grpc_resource_user_finish_reclamation(t->resource_user);
}
GRPC_CHTTP2_UNREF_TRANSPORT(t, "destructive_reclaimer");
}

@ -34,9 +34,12 @@ extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_hpack_parser;
extern bool g_flow_control_enabled;
/// Creates a CHTTP2 Transport. This takes ownership of a \a resource_user ref
/// from the caller; if the caller still needs the resource_user after creating
/// a transport, the caller must take another ref.
grpc_transport* grpc_create_chttp2_transport(
const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client,
grpc_resource_user* resource_user = nullptr);
grpc_resource_user* resource_user);
grpc_core::RefCountedPtr<grpc_core::channelz::SocketNode>
grpc_chttp2_transport_get_socket_node(grpc_transport* transport);

@ -345,9 +345,8 @@ static double AdjustForMemoryPressure(grpc_resource_quota* quota,
}
double TransportFlowControl::TargetLogBdp() {
return AdjustForMemoryPressure(
grpc_resource_user_quota(grpc_endpoint_get_resource_user(t_->ep)),
1 + log2(bdp_estimator_.EstimateBdp()));
return AdjustForMemoryPressure(grpc_resource_user_quota(t_->resource_user),
1 + log2(bdp_estimator_.EstimateBdp()));
}
double TransportFlowControl::SmoothLogBdp(double value) {

@ -58,8 +58,8 @@ GRPCAPI grpc_channel* grpc_cronet_secure_channel_create(
grpc_create_cronet_transport(engine, target, new_args, reserved);
grpc_core::ExecCtx exec_ctx;
grpc_channel* channel =
grpc_channel_create(target, new_args, GRPC_CLIENT_DIRECT_CHANNEL, ct);
grpc_channel* channel = grpc_channel_create(
target, new_args, GRPC_CLIENT_DIRECT_CHANNEL, ct, nullptr, 0, nullptr);
grpc_channel_args_destroy(new_args);
return channel;
}

@ -1290,7 +1290,6 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
const grpc_channel_args* server_args = grpc_channel_args_copy_and_remove(
server->core_server->channel_args(), args_to_remove,
GPR_ARRAY_SIZE(args_to_remove));
// Add a default authority channel argument for the client
grpc_arg default_authority_arg;
default_authority_arg.type = GRPC_ARG_STRING;
@ -1311,7 +1310,7 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
if (error == GRPC_ERROR_NONE) {
channel =
grpc_channel_create("inproc", client_args, GRPC_CLIENT_DIRECT_CHANNEL,
client_transport, nullptr, &error);
client_transport, nullptr, 0, &error);
if (error != GRPC_ERROR_NONE) {
GPR_ASSERT(!channel);
gpr_log(GPR_ERROR, "Failed to create client channel: %s",

@ -41,6 +41,7 @@ struct grpc_channel_stack_builder {
grpc_channel_args* args;
grpc_transport* transport;
grpc_resource_user* resource_user;
size_t preallocated_bytes;
char* target;
const char* name;
};
@ -174,17 +175,6 @@ grpc_transport* grpc_channel_stack_builder_get_transport(
return builder->transport;
}
void grpc_channel_stack_builder_set_resource_user(
grpc_channel_stack_builder* builder, grpc_resource_user* resource_user) {
GPR_ASSERT(builder->resource_user == nullptr);
builder->resource_user = resource_user;
}
grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
grpc_channel_stack_builder* builder) {
return builder->resource_user;
}
bool grpc_channel_stack_builder_append_filter(
grpc_channel_stack_builder* builder, const grpc_channel_filter* filter,
grpc_post_filter_create_init_func post_init_func, void* user_data) {

@ -54,14 +54,6 @@ void grpc_channel_stack_builder_set_transport(
grpc_transport* grpc_channel_stack_builder_get_transport(
grpc_channel_stack_builder* builder);
/// Attach \a resource_user to the builder (does not take ownership)
void grpc_channel_stack_builder_set_resource_user(
grpc_channel_stack_builder* builder, grpc_resource_user* resource_user);
/// Fetch attached resource user
grpc_resource_user* grpc_channel_stack_builder_get_resource_user(
grpc_channel_stack_builder* builder);
/// Set channel arguments: copies args
void grpc_channel_stack_builder_set_channel_arguments(
grpc_channel_stack_builder* builder, const grpc_channel_args* args);

@ -31,7 +31,6 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/address_utils/sockaddr_utils.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/http/format_request.h"
@ -48,6 +47,7 @@ struct internal_request {
grpc_resolved_addresses* addresses;
size_t next_address;
grpc_endpoint* ep;
grpc_resource_quota* resource_quota;
char* host;
char* ssl_host_override;
grpc_millis deadline;
@ -63,7 +63,6 @@ struct internal_request {
grpc_closure done_write;
grpc_closure connected;
grpc_error_handle overall_error;
grpc_resource_quota* resource_quota;
};
static grpc_httpcli_get_override g_get_override = nullptr;
static grpc_httpcli_post_override g_post_override = nullptr;
@ -208,12 +207,11 @@ static void next_address(internal_request* req, grpc_error_handle error) {
addr = &req->addresses->addrs[req->next_address++];
GRPC_CLOSURE_INIT(&req->connected, on_connected, req,
grpc_schedule_on_exec_ctx);
grpc_arg arg = grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_RESOURCE_QUOTA), req->resource_quota,
grpc_resource_quota_arg_vtable());
grpc_channel_args args = {1, &arg};
grpc_tcp_client_connect(&req->connected, &req->ep, req->context->pollset_set,
&args, addr, req->deadline);
grpc_tcp_client_connect(&req->connected, &req->ep,
grpc_slice_allocator_create(
req->resource_quota, grpc_sockaddr_to_uri(addr)),
req->context->pollset_set, nullptr, addr,
req->deadline);
}
static void on_resolved(void* arg, grpc_error_handle error) {
@ -246,7 +244,7 @@ static void internal_request_begin(grpc_httpcli_context* context,
req->context = context;
req->pollent = pollent;
req->overall_error = GRPC_ERROR_NONE;
req->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
req->resource_quota = resource_quota;
GRPC_CLOSURE_INIT(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&req->done_write, done_write, req,
grpc_schedule_on_exec_ctx);
@ -271,6 +269,7 @@ void grpc_httpcli_get(grpc_httpcli_context* context,
const grpc_httpcli_request* request, grpc_millis deadline,
grpc_closure* on_done, grpc_httpcli_response* response) {
if (g_get_override && g_get_override(request, deadline, on_done, response)) {
grpc_resource_quota_unref_internal(resource_quota);
return;
}
std::string name =
@ -289,6 +288,7 @@ void grpc_httpcli_post(grpc_httpcli_context* context,
grpc_httpcli_response* response) {
if (g_post_override && g_post_override(request, body_bytes, body_size,
deadline, on_done, response)) {
grpc_resource_quota_unref_internal(resource_quota);
return;
}
std::string name =

@ -30,6 +30,7 @@
#include "src/core/lib/iomgr/iomgr_internal.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resource_quota.h"
/* User agent this library reports */
#define GRPC_HTTPCLI_USER_AGENT "grpc-httpcli/0.0"
@ -75,6 +76,7 @@ void grpc_httpcli_context_destroy(grpc_httpcli_context* context);
'pollset' indicates a grpc_pollset that is interested in the result
of the get - work on this pollset may be used to progress the get
operation
'resource_quota: this function takes ownership of a ref from the caller
'request' contains request parameters - these are caller owned and can be
destroyed once the call returns
'deadline' contains a deadline for the request (or gpr_inf_future)
@ -90,6 +92,7 @@ void grpc_httpcli_get(grpc_httpcli_context* context,
'pollset' indicates a grpc_pollset that is interested in the result
of the post - work on this pollset may be used to progress the post
operation
'resource_quota' - this function takes ownership of a ref from the caller.
'request' contains request parameters - these are caller owned and can be
destroyed once the call returns
'body_bytes' and 'body_size' specify the payload for the post.

@ -62,10 +62,6 @@ absl::string_view grpc_endpoint_get_local_address(grpc_endpoint* ep) {
int grpc_endpoint_get_fd(grpc_endpoint* ep) { return ep->vtable->get_fd(ep); }
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep) {
return ep->vtable->get_resource_user(ep);
}
bool grpc_endpoint_can_track_err(grpc_endpoint* ep) {
return ep->vtable->can_track_err(ep);
}

@ -46,7 +46,6 @@ struct grpc_endpoint_vtable {
void (*delete_from_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset);
void (*shutdown)(grpc_endpoint* ep, grpc_error_handle why);
void (*destroy)(grpc_endpoint* ep);
grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
absl::string_view (*get_peer)(grpc_endpoint* ep);
absl::string_view (*get_local_address)(grpc_endpoint* ep);
int (*get_fd)(grpc_endpoint* ep);
@ -99,8 +98,6 @@ void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep,
void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
grpc_pollset_set* pollset_set);
grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* ep);
bool grpc_endpoint_can_track_err(grpc_endpoint* ep);
struct grpc_endpoint {

@ -59,11 +59,10 @@ struct CFStreamEndpoint {
std::string peer_string;
std::string local_address;
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
grpc_slice_allocator* slice_allocator;
};
static void CFStreamFree(CFStreamEndpoint* ep) {
grpc_resource_user_unref(ep->resource_user);
grpc_slice_allocator_destroy(ep->slice_allocator);
CFRelease(ep->read_stream);
CFRelease(ep->write_stream);
CFSTREAM_HANDLE_UNREF(ep->stream_sync, "free");
@ -263,9 +262,10 @@ static void CFStreamRead(grpc_endpoint* ep, grpc_slice_buffer* slices,
ep_impl->read_slices = slices;
grpc_slice_buffer_reset_and_unref_internal(slices);
EP_REF(ep_impl, "read");
if (grpc_resource_user_alloc_slices(&ep_impl->slice_allocator,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
ep_impl->read_slices)) {
if (grpc_slice_allocator_allocate(
ep_impl->slice_allocator, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
grpc_slice_allocator_intent::kReadBuffer, ep_impl->read_slices,
CFStreamReadAllocationDone, ep_impl)) {
ep_impl->stream_sync->NotifyOnRead(&ep_impl->read_action);
}
}
@ -292,7 +292,6 @@ void CFStreamShutdown(grpc_endpoint* ep, grpc_error_handle why) {
CFReadStreamClose(ep_impl->read_stream);
CFWriteStreamClose(ep_impl->write_stream);
ep_impl->stream_sync->Shutdown(why);
grpc_resource_user_shutdown(ep_impl->resource_user);
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CFStream endpoint:%p shutdown DONE (%p)", ep_impl, why);
}
@ -306,11 +305,6 @@ void CFStreamDestroy(grpc_endpoint* ep) {
EP_UNREF(ep_impl, "destroy");
}
grpc_resource_user* CFStreamGetResourceUser(grpc_endpoint* ep) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
return ep_impl->resource_user;
}
absl::string_view CFStreamGetPeer(grpc_endpoint* ep) {
CFStreamEndpoint* ep_impl = reinterpret_cast<CFStreamEndpoint*>(ep);
return ep_impl->peer_string;
@ -337,7 +331,6 @@ static const grpc_endpoint_vtable vtable = {CFStreamRead,
CFStreamDeleteFromPollsetSet,
CFStreamShutdown,
CFStreamDestroy,
CFStreamGetResourceUser,
CFStreamGetPeer,
CFStreamGetLocalAddress,
CFStreamGetFD,
@ -345,7 +338,7 @@ static const grpc_endpoint_vtable vtable = {CFStreamRead,
grpc_endpoint* grpc_cfstream_endpoint_create(
CFReadStreamRef read_stream, CFWriteStreamRef write_stream,
const char* peer_string, grpc_resource_quota* resource_quota,
const char* peer_string, grpc_slice_allocator* slice_allocator,
CFStreamHandle* stream_sync) {
CFStreamEndpoint* ep_impl = new CFStreamEndpoint;
if (grpc_tcp_trace.enabled()) {
@ -387,11 +380,7 @@ grpc_endpoint* grpc_cfstream_endpoint_create(
static_cast<void*>(ep_impl), grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&ep_impl->write_action, WriteAction,
static_cast<void*>(ep_impl), grpc_schedule_on_exec_ctx);
ep_impl->resource_user =
grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(&ep_impl->slice_allocator,
ep_impl->resource_user,
CFStreamReadAllocationDone, ep_impl);
ep_impl->slice_allocator = slice_allocator;
return &ep_impl->base;
}

@ -41,7 +41,7 @@
grpc_endpoint* grpc_cfstream_endpoint_create(
CFReadStreamRef read_stream, CFWriteStreamRef write_stream,
const char* peer_string, grpc_resource_quota* resource_quota,
const char* peer_string, grpc_slice_allocator* slice_allocator,
CFStreamHandle* stream_sync);
#endif /* GRPC_CFSTREAM */

@ -27,6 +27,7 @@ struct grpc_endpoint_pair {
grpc_endpoint* client;
grpc_endpoint* server;
};
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
grpc_channel_args* args);

@ -57,16 +57,20 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
int sv[2];
grpc_endpoint_pair p;
create_sockets(sv);
grpc_core::ExecCtx exec_ctx;
std::string final_name = absl::StrCat(name, ":client");
p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name.c_str(), false),
args, "socketpair-server");
grpc_resource_quota* resource_quota =
grpc_resource_quota_from_channel_args(args, true);
p.client = grpc_tcp_create(
grpc_fd_create(sv[1], final_name.c_str(), false), args,
"socketpair-server",
grpc_slice_allocator_create(resource_quota, "server_endpoint", args));
final_name = absl::StrCat(name, ":server");
p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name.c_str(), false),
args, "socketpair-client");
p.server = grpc_tcp_create(
grpc_fd_create(sv[0], final_name.c_str(), false), args,
"socketpair-client",
grpc_slice_allocator_create(resource_quota, "client_endpoint", args));
grpc_resource_quota_unref_internal(resource_quota);
return p;
}

@ -28,13 +28,13 @@
#include "src/core/lib/iomgr/endpoint_pair.h"
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
grpc_channel_args* args) {
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
const char* /* name */, grpc_channel_args* /* args */) {
grpc_endpoint_pair endpoint_pair;
// TODO(mlumish): implement this properly under libuv
GPR_ASSERT(false &&
"grpc_iomgr_create_endpoint_pair is not suppoted with libuv");
return endpoint_pair;
GPR_UNREACHABLE_CODE(return endpoint_pair);
}
#endif /* GRPC_UV */

@ -76,11 +76,19 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(
grpc_endpoint_pair p;
create_sockets(sv);
grpc_core::ExecCtx exec_ctx;
p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
channel_args, "endpoint:server");
p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
channel_args, "endpoint:client");
grpc_resource_quota* resource_quota =
grpc_resource_quota_from_channel_args(channel_args, true);
p.client =
grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
channel_args, "endpoint:server",
grpc_slice_allocator_create(
resource_quota, "endpoint:server", channel_args));
p.server =
grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
channel_args, "endpoint:client",
grpc_slice_allocator_create(
resource_quota, "endpoint:client", channel_args));
grpc_resource_quota_unref_internal(resource_quota);
return p;
}

@ -39,6 +39,7 @@ namespace {
using ::grpc_event_engine::experimental::EventEngine;
using ::grpc_event_engine::experimental::ResolvedAddressToURI;
using ::grpc_event_engine::experimental::SliceAllocator;
using ::grpc_event_engine::experimental::SliceBuffer;
void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
@ -102,21 +103,14 @@ void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
gpr_log(GPR_INFO, "TCP Endpoint %p shutdown why=%s", eeep->endpoint.get(),
str);
}
grpc_resource_user_shutdown(eeep->ru);
eeep->endpoint.reset();
}
void endpoint_destroy(grpc_endpoint* ep) {
auto* eeep = reinterpret_cast<grpc_event_engine_endpoint*>(ep);
grpc_resource_user_unref(eeep->ru);
delete eeep;
}
grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
auto* eeep = reinterpret_cast<grpc_event_engine_endpoint*>(ep);
return eeep->ru;
}
absl::string_view endpoint_get_peer(grpc_endpoint* ep) {
auto* eeep = reinterpret_cast<grpc_event_engine_endpoint*>(ep);
if (eeep->endpoint == nullptr) {
@ -154,7 +148,6 @@ grpc_endpoint_vtable grpc_event_engine_endpoint_vtable = {
endpoint_delete_from_pollset_set,
endpoint_shutdown,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
endpoint_get_local_address,
endpoint_get_fd,
@ -166,7 +159,6 @@ grpc_event_engine_endpoint* grpc_tcp_server_endpoint_create(
std::unique_ptr<EventEngine::Endpoint> ee_endpoint) {
auto endpoint = new grpc_event_engine_endpoint;
endpoint->base.vtable = &grpc_event_engine_endpoint_vtable;
// TODO(hork): populate endpoint->ru from the uvEngine's subclass
endpoint->endpoint = std::move(ee_endpoint);
return endpoint;
}
@ -175,17 +167,6 @@ grpc_endpoint* grpc_tcp_create(const grpc_channel_args* channel_args,
absl::string_view peer_address) {
auto endpoint = new grpc_event_engine_endpoint;
endpoint->base.vtable = &grpc_event_engine_endpoint_vtable;
grpc_resource_quota* resource_quota =
grpc_channel_args_find_pointer<grpc_resource_quota>(
channel_args, GRPC_ARG_RESOURCE_QUOTA);
if (resource_quota != nullptr) {
grpc_resource_quota_ref_internal(resource_quota);
} else {
resource_quota = grpc_resource_quota_create(nullptr);
}
endpoint->ru = grpc_resource_user_create(resource_quota,
std::string(peer_address).c_str());
grpc_resource_quota_unref_internal(resource_quota);
return &endpoint->base;
}

@ -28,7 +28,6 @@ struct grpc_event_engine_endpoint {
endpoint;
std::string peer_address;
std::string local_address;
grpc_resource_user* ru = nullptr;
std::aligned_storage<
sizeof(grpc_event_engine::experimental::SliceBuffer),
alignof(grpc_event_engine::experimental::SliceBuffer)>::type read_buffer;
@ -45,7 +44,7 @@ grpc_event_engine_endpoint* grpc_tcp_server_endpoint_create(
/// Creates a new internal grpc_endpoint struct, when no EventEngine Endpoint
/// has yet been created. This is used in client code before connections are
/// established.
/// established. Takes ownership of the slice_allocator.
grpc_endpoint* grpc_tcp_create(const grpc_channel_args* channel_args,
absl::string_view peer_address);

@ -41,23 +41,53 @@ using ::grpc_event_engine::experimental::SliceAllocatorFactory;
using ::grpc_event_engine::experimental::SliceBuffer;
} // namespace
// TODO(hork): remove these classes in PR #26643, when the iomgr APIs change to
// accept SliceAllocators and SliceAllocatorFactory(ie)s. In the meantime, the
// libuv work has temporary implementations as well.
class NoopSliceAllocator : public SliceAllocator {
class WrappedInternalSliceAllocator : public SliceAllocator {
public:
explicit WrappedInternalSliceAllocator(grpc_slice_allocator* slice_allocator)
: slice_allocator_(slice_allocator) {}
~WrappedInternalSliceAllocator() {
grpc_slice_allocator_destroy(slice_allocator_);
}
absl::Status Allocate(size_t size, SliceBuffer* dest,
SliceAllocator::AllocateCallback cb) {
SliceAllocator::AllocateCallback cb) override {
// TODO(nnoble): requires the SliceBuffer definition.
grpc_slice_allocator_allocate(
slice_allocator_, size, 1, grpc_slice_allocator_intent::kReadBuffer,
dest->RawSliceBuffer(),
[](void* arg, grpc_error_handle error) {
auto cb = static_cast<SliceAllocator::AllocateCallback*>(arg);
(*cb)(grpc_error_to_absl_status(error));
delete cb;
},
new SliceAllocator::AllocateCallback(cb));
return absl::OkStatus();
}
private:
grpc_slice_allocator* slice_allocator_;
};
class NoopSliceAllocatorFactory : public SliceAllocatorFactory {
class WrappedInternalSliceAllocatorFactory : public SliceAllocatorFactory {
public:
explicit WrappedInternalSliceAllocatorFactory(
grpc_slice_allocator_factory* slice_allocator_factory)
: slice_allocator_factory_(slice_allocator_factory) {}
~WrappedInternalSliceAllocatorFactory() {
grpc_slice_allocator_factory_destroy(slice_allocator_factory_);
}
std::unique_ptr<SliceAllocator> CreateSliceAllocator(
absl::string_view peer_name) {
return absl::make_unique<NoopSliceAllocator>();
absl::string_view peer_name) override {
return absl::make_unique<WrappedInternalSliceAllocator>(
grpc_slice_allocator_factory_create_slice_allocator(
slice_allocator_factory_, peer_name));
};
private:
grpc_slice_allocator_factory* slice_allocator_factory_;
};
struct grpc_tcp_server {
@ -105,6 +135,7 @@ EventEngine::OnConnectCallback GrpcClosureToOnConnectCallback(
/// Usage note: this method does not take ownership of any pointer arguments.
void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* /* interested_parties */,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, grpc_millis deadline) {
@ -114,16 +145,16 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
*endpoint = &ee_endpoint->base;
EventEngine::OnConnectCallback ee_on_connect =
GrpcClosureToOnConnectCallback(on_connect, endpoint);
// TODO(hork): tcp_connect will change to accept a SliceAllocator. This is
// temporary.
auto sa = absl::make_unique<NoopSliceAllocator>();
auto ee_slice_allocator =
absl::make_unique<WrappedInternalSliceAllocator>(slice_allocator);
EventEngine::ResolvedAddress ra(reinterpret_cast<const sockaddr*>(addr->addr),
addr->len);
absl::Time ee_deadline = grpc_core::ToAbslTime(
grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC));
ChannelArgsEndpointConfig endpoint_config(channel_args);
absl::Status connected = grpc_iomgr_event_engine()->Connect(
ee_on_connect, ra, endpoint_config, std::move(sa), ee_deadline);
ee_on_connect, ra, endpoint_config, std::move(ee_slice_allocator),
ee_deadline);
if (!connected.ok()) {
// EventEngine failed to start an asynchronous connect.
grpc_endpoint_destroy(*endpoint);
@ -133,17 +164,14 @@ void tcp_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
}
}
grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
grpc_error* tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server) {
ChannelArgsEndpointConfig endpoint_config(args);
grpc_resource_quota* rq = grpc_resource_quota_from_channel_args(args);
if (rq == nullptr) {
rq = grpc_resource_quota_create(nullptr);
}
// TODO(hork): tcp_server_create will change to accept a
// SliceAllocatorFactory. This is temporary.
auto saf = absl::make_unique<NoopSliceAllocatorFactory>();
auto ee_slice_allocator_factory =
absl::make_unique<WrappedInternalSliceAllocatorFactory>(
slice_allocator_factory);
EventEngine* event_engine = grpc_iomgr_event_engine();
absl::StatusOr<std::unique_ptr<EventEngine::Listener>> listener =
event_engine->CreateListener(
@ -164,7 +192,7 @@ grpc_error* tcp_server_create(grpc_closure* shutdown_complete,
grpc_pollset_ee_broadcast_event();
},
GrpcClosureToCallback(shutdown_complete, GRPC_ERROR_NONE),
endpoint_config, std::move(saf));
endpoint_config, std::move(ee_slice_allocator_factory));
if (!listener.ok()) {
return absl_status_to_grpc_error(listener.status());
}
@ -256,7 +284,8 @@ grpc_fd* grpc_fd_create(int /* fd */, const char* /* name */,
grpc_endpoint* grpc_tcp_client_create_from_fd(
grpc_fd* /* fd */, const grpc_channel_args* /* channel_args */,
const char* /* addr_str */) {
const char* /* addr_str */, grpc_slice_allocator* slice_allocator) {
grpc_slice_allocator_destroy(slice_allocator);
return nullptr;
}

@ -33,6 +33,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/iomgr/combiner.h"
#include "src/core/lib/slice/slice_internal.h"
@ -484,6 +485,7 @@ static grpc_slice ru_slice_create(grpc_resource_user* resource_user,
* the combiner
*/
// TODO(hork): rename all ru variables to resource_user
static void ru_allocate(void* ru, grpc_error_handle /*error*/) {
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
if (rulist_empty(resource_user->resource_quota,
@ -590,11 +592,14 @@ static void ru_destroy(void* ru, grpc_error_handle /*error*/) {
}
grpc_resource_quota_unref_internal(resource_user->resource_quota);
gpr_mu_destroy(&resource_user->mu);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) destroyed", resource_user->name.c_str(),
resource_user);
}
delete resource_user;
}
static void ru_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator) {
static void ru_alloc_slices(grpc_slice_allocator* slice_allocator) {
for (size_t i = 0; i < slice_allocator->count; i++) {
grpc_slice_buffer_add_indexed(
slice_allocator->dest, ru_slice_create(slice_allocator->resource_user,
@ -603,8 +608,8 @@ static void ru_alloc_slices(
}
static void ru_allocated_slices(void* arg, grpc_error_handle error) {
grpc_resource_user_slice_allocator* slice_allocator =
static_cast<grpc_resource_user_slice_allocator*>(arg);
grpc_slice_allocator* slice_allocator =
static_cast<grpc_slice_allocator*>(arg);
if (error == GRPC_ERROR_NONE) ru_alloc_slices(slice_allocator);
grpc_core::Closure::Run(DEBUG_LOCATION, &slice_allocator->on_done,
GRPC_ERROR_REF(error));
@ -740,16 +745,10 @@ size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
grpc_resource_quota* grpc_resource_quota_from_channel_args(
const grpc_channel_args* channel_args, bool create) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
if (channel_args->args[i].type == GRPC_ARG_POINTER) {
return grpc_resource_quota_ref_internal(
static_cast<grpc_resource_quota*>(
channel_args->args[i].value.pointer.p));
} else {
gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
}
}
auto* resource_quota = grpc_channel_args_find_pointer<grpc_resource_quota>(
channel_args, GRPC_ARG_RESOURCE_QUOTA);
if (resource_quota != nullptr) {
return grpc_resource_quota_ref_internal(resource_quota);
}
return create ? grpc_resource_quota_create(nullptr) : nullptr;
}
@ -775,7 +774,7 @@ const grpc_arg_pointer_vtable* grpc_resource_quota_arg_vtable(void) {
*/
grpc_resource_user* grpc_resource_user_create(
grpc_resource_quota* resource_quota, const char* name) {
grpc_resource_quota* resource_quota, absl::string_view name) {
grpc_resource_user* resource_user = new grpc_resource_user;
resource_user->resource_quota =
grpc_resource_quota_ref_internal(resource_quota);
@ -805,14 +804,16 @@ grpc_resource_user* grpc_resource_user_create(
for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
resource_user->links[i].next = resource_user->links[i].prev = nullptr;
}
// TODO(hork): the RU should own a copy of the name. See Craig's comments on
// the EventEngine gRFC for justification.
if (name != nullptr) {
resource_user->name = name;
resource_user->name = std::string(name);
} else {
resource_user->name = absl::StrCat(
"anonymous_resource_user_", reinterpret_cast<intptr_t>(resource_user));
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) created", resource_user->name.c_str(),
resource_user);
}
return resource_user;
}
@ -823,13 +824,22 @@ grpc_resource_quota* grpc_resource_user_quota(
static void ru_ref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0);
gpr_atm prior = gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) reffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, prior, prior + amount);
}
GPR_ASSERT(prior != 0);
}
static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) {
GPR_ASSERT(amount > 0);
gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
GPR_ASSERT(old >= amount);
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO, "RU '%s' (%p) unreffing: %" PRIdPTR " -> %" PRIdPTR,
resource_user->name.c_str(), resource_user, old, old - amount);
}
if (old == amount) {
resource_user->resource_quota->combiner->Run(
&resource_user->destroy_closure, GRPC_ERROR_NONE);
@ -857,9 +867,10 @@ bool grpc_resource_user_allocate_threads(grpc_resource_user* resource_user,
GPR_ASSERT(thread_count >= 0);
bool is_success = false;
gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
grpc_resource_quota* rq = resource_user->resource_quota;
if (rq->num_threads_allocated + thread_count <= rq->max_threads) {
rq->num_threads_allocated += thread_count;
grpc_resource_quota* resource_quota = resource_user->resource_quota;
if (resource_quota->num_threads_allocated + thread_count <=
resource_quota->max_threads) {
resource_quota->num_threads_allocated += thread_count;
gpr_atm_no_barrier_fetch_add(&resource_user->num_threads_allocated,
thread_count);
is_success = true;
@ -872,15 +883,16 @@ void grpc_resource_user_free_threads(grpc_resource_user* resource_user,
int thread_count) {
GPR_ASSERT(thread_count >= 0);
gpr_mu_lock(&resource_user->resource_quota->thread_count_mu);
grpc_resource_quota* rq = resource_user->resource_quota;
rq->num_threads_allocated -= thread_count;
grpc_resource_quota* resource_quota = resource_user->resource_quota;
resource_quota->num_threads_allocated -= thread_count;
int old_count = static_cast<int>(gpr_atm_no_barrier_fetch_add(
&resource_user->num_threads_allocated, -thread_count));
if (old_count < thread_count || rq->num_threads_allocated < 0) {
if (old_count < thread_count || resource_quota->num_threads_allocated < 0) {
gpr_log(GPR_ERROR,
"Releasing more threads (%d) than currently allocated (rq threads: "
"%d, ru threads: %d)",
thread_count, rq->num_threads_allocated + thread_count, old_count);
"Releasing more threads (%d) than currently allocated "
"(resource_quota threads: %d, ru threads: %d)",
thread_count, resource_quota->num_threads_allocated + thread_count,
old_count);
abort();
}
gpr_mu_unlock(&resource_user->resource_quota->thread_count_mu);
@ -988,19 +1000,69 @@ void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
GRPC_ERROR_NONE);
}
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator* slice_allocator,
grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p) {
grpc_slice_allocator* grpc_slice_allocator_create(
grpc_resource_quota* resource_quota, absl::string_view name,
const grpc_channel_args* args) {
grpc_slice_allocator* slice_allocator = new grpc_slice_allocator;
slice_allocator->min_length = grpc_channel_args_find_integer(
args, GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE,
{GRPC_SLICE_ALLOCATOR_MIN_ALLOCATE_SIZE, -1, INT_MAX});
slice_allocator->max_length = grpc_channel_args_find_integer(
args, GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE,
{GRPC_SLICE_ALLOCATOR_MAX_ALLOCATE_SIZE, -1, INT_MAX});
slice_allocator->resource_user =
grpc_resource_user_create(resource_quota, name);
GRPC_CLOSURE_INIT(&slice_allocator->on_allocated, ru_allocated_slices,
slice_allocator, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p,
grpc_schedule_on_exec_ctx);
slice_allocator->resource_user = resource_user;
return slice_allocator;
}
void grpc_slice_allocator_destroy(grpc_slice_allocator* slice_allocator) {
ru_unref_by(slice_allocator->resource_user, 1);
delete slice_allocator;
}
bool grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest) {
static size_t grpc_slice_allocator_adjust_allocation_length(
grpc_slice_allocator* slice_allocator, size_t requested_length,
grpc_slice_allocator_intent intent) {
if (intent == grpc_slice_allocator_intent::kDefault) {
return requested_length;
}
GPR_ASSERT(intent == grpc_slice_allocator_intent::kReadBuffer);
double pressure = grpc_resource_quota_get_memory_pressure(
slice_allocator->resource_user->resource_quota);
// Reduce allocation size proportional to the pressure > 80% usage.
size_t target =
requested_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
// Target will be some multiple of 8 bytes, rounded up
target = ((static_cast<size_t> GPR_CLAMP(target, slice_allocator->min_length,
slice_allocator->max_length)) +
255) &
~static_cast<size_t>(255);
// Don't use more than 1/16th of the overall resource quota for a single
// read alloc
size_t rqmax = grpc_resource_quota_peek_size(
slice_allocator->resource_user->resource_quota);
if (target > rqmax / 16 && rqmax > 1024) {
target = rqmax / 16;
}
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(
GPR_INFO,
"SliceAllocator(%p) requested %zu bytes for (%s) intent, adjusted "
"allocation size to %zu",
slice_allocator, requested_length,
intent == grpc_slice_allocator_intent::kDefault ? "default" : "read",
target);
}
return target;
}
bool grpc_slice_allocator_allocate(grpc_slice_allocator* slice_allocator,
size_t length, size_t count,
grpc_slice_allocator_intent intent,
grpc_slice_buffer* dest,
grpc_iomgr_cb_func cb, void* p) {
if (GPR_UNLIKELY(
gpr_atm_no_barrier_load(&slice_allocator->resource_user->shutdown))) {
grpc_core::ExecCtx::Run(
@ -1008,12 +1070,35 @@ bool grpc_resource_user_alloc_slices(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resource user shutdown"));
return false;
}
slice_allocator->length = length;
GRPC_CLOSURE_INIT(&slice_allocator->on_done, cb, p,
grpc_schedule_on_exec_ctx);
slice_allocator->length = grpc_slice_allocator_adjust_allocation_length(
slice_allocator, length, intent);
slice_allocator->count = count;
slice_allocator->dest = dest;
const bool ret =
grpc_resource_user_alloc(slice_allocator->resource_user, count * length,
&slice_allocator->on_allocated);
const bool ret = grpc_resource_user_alloc(slice_allocator->resource_user,
count * slice_allocator->length,
&slice_allocator->on_allocated);
if (ret) ru_alloc_slices(slice_allocator);
return ret;
}
grpc_slice_allocator_factory* grpc_slice_allocator_factory_create(
grpc_resource_quota* resource_quota) {
grpc_slice_allocator_factory* factory = new grpc_slice_allocator_factory;
factory->resource_quota = resource_quota;
return factory;
}
grpc_slice_allocator* grpc_slice_allocator_factory_create_slice_allocator(
grpc_slice_allocator_factory* slice_allocator_factory,
absl::string_view name, grpc_channel_args* args) {
return grpc_slice_allocator_create(slice_allocator_factory->resource_quota,
name, args);
}
void grpc_slice_allocator_factory_destroy(
grpc_slice_allocator_factory* slice_allocator_factory) {
grpc_resource_quota_unref_internal(slice_allocator_factory->resource_quota);
delete slice_allocator_factory;
}

@ -69,6 +69,8 @@ extern grpc_core::TraceFlag grpc_resource_quota_trace;
// hard coding.
constexpr size_t GRPC_RESOURCE_QUOTA_CALL_SIZE = 15 * 1024;
constexpr size_t GRPC_RESOURCE_QUOTA_CHANNEL_SIZE = 50 * 1024;
constexpr size_t GRPC_SLICE_ALLOCATOR_MIN_ALLOCATE_SIZE = 256;
constexpr size_t GRPC_SLICE_ALLOCATOR_MAX_ALLOCATE_SIZE = 4 * 1024 * 1024;
grpc_resource_quota* grpc_resource_quota_ref_internal(
grpc_resource_quota* resource_quota);
@ -87,7 +89,7 @@ size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota);
typedef struct grpc_resource_user grpc_resource_user;
grpc_resource_user* grpc_resource_user_create(
grpc_resource_quota* resource_quota, const char* name);
grpc_resource_quota* resource_quota, absl::string_view name);
/* Returns a borrowed reference to the underlying resource quota for this
resource user. */
@ -145,7 +147,7 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user);
/* Helper to allocate slices from a resource user */
typedef struct grpc_resource_user_slice_allocator {
typedef struct grpc_slice_allocator {
/* Closure for when a resource user allocation completes */
grpc_closure on_allocated;
/* Closure to call when slices have been allocated */
@ -154,24 +156,71 @@ typedef struct grpc_resource_user_slice_allocator {
size_t length;
/* Number of slices to allocate on the current request */
size_t count;
/* Minimum size to allocate under memory pressure. */
size_t min_length;
/* Maximum size that can be allocated. */
size_t max_length;
/* Destination for slices to allocate on the current request */
grpc_slice_buffer* dest;
/* Parent resource user */
grpc_resource_user* resource_user;
} grpc_resource_user_slice_allocator;
/* Initialize a slice allocator.
When an allocation is completed, calls \a cb with arg \p. */
void grpc_resource_user_slice_allocator_init(
grpc_resource_user_slice_allocator* slice_allocator,
grpc_resource_user* resource_user, grpc_iomgr_cb_func cb, void* p);
/* Allocate \a count slices of length \a length into \a dest. Only one request
can be outstanding at a time.
Returns whether the slice was allocated inline in the function. If true,
the \a slice_allocator->on_allocated callback will not be called. */
bool grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator* slice_allocator, size_t length,
size_t count, grpc_slice_buffer* dest) GRPC_MUST_USE_RESULT;
} grpc_slice_allocator;
/// Constructs a slice allocator using configuration from \a args.
///
/// Minimum and maximum limits for memory allocation size can be defined in
/// \a args, and used to configure an allocator. See
/// \a grpc_slice_allocator_allocate for details on how those values are used.
///
/// Caller is responsible for calling \a grpc_slice_allocator_destroy.
grpc_slice_allocator* grpc_slice_allocator_create(
grpc_resource_quota* resource_quota, absl::string_view name,
const grpc_channel_args* args = nullptr);
/* Cleans up after a slice_allocator. */
void grpc_slice_allocator_destroy(grpc_slice_allocator* slice_allocator);
enum class grpc_slice_allocator_intent {
kDefault, // Default intent allocates exactly the memory required.
kReadBuffer // ReadBuffer intent may return a smaller slice than requested if
// memory pressure is high.
};
/** Allocate \a count slices of length \a length into \a dest. Only one request
can be outstanding at a time. When an allocation is completed, calls \a cb
with arg \a p. Returns whether the slice was allocated inline in the
function. If true, the \a cb will not be called. The \a intent argument
allows allocation of smaller slices if memory pressure is high; the size is
implementation-dependent. */
bool grpc_slice_allocator_allocate(grpc_slice_allocator* slice_allocator,
size_t length, size_t count,
grpc_slice_allocator_intent intent,
grpc_slice_buffer* dest,
grpc_iomgr_cb_func cb,
void* p) GRPC_MUST_USE_RESULT;
/* Allows creation of slice_allocators (thus resource_users) without calling
* code having to understand resource_user concepts. */
typedef struct grpc_slice_allocator_factory {
/* Parent resource quota */
grpc_resource_quota* resource_quota;
} grpc_slice_allocator_factory;
/* Constructs a slice allocator factory. Takes ownership of a ref on
* \a resource_quota from the caller. Caller is responsible for calling \a
* grpc_slice_allocator_factory_destroy. */
grpc_slice_allocator_factory* grpc_slice_allocator_factory_create(
grpc_resource_quota* resource_quota);
/* Cleans up after a slice_allocator. */
void grpc_slice_allocator_factory_destroy(
grpc_slice_allocator_factory* slice_allocator_factory);
/** A factory method to create and initialize a slice_allocator using the
factory's resource quota. \a name is the resulting resource_user name. \a args
are used to configure the \a slice_allocator */
grpc_slice_allocator* grpc_slice_allocator_factory_create_slice_allocator(
grpc_slice_allocator_factory* slice_allocator_factory,
absl::string_view name, grpc_channel_args* args = nullptr);
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */

@ -23,12 +23,14 @@
grpc_tcp_client_vtable* grpc_tcp_client_impl;
void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
grpc_millis deadline) {
grpc_tcp_client_impl->connect(on_connect, endpoint, interested_parties,
channel_args, addr, deadline);
grpc_tcp_client_impl->connect(on_connect, endpoint, slice_allocator,
interested_parties, channel_args, addr,
deadline);
}
void grpc_set_tcp_client_impl(grpc_tcp_client_vtable* impl) {

@ -26,9 +26,11 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/resource_quota.h"
typedef struct grpc_tcp_client_vtable {
void (*connect)(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr, grpc_millis deadline);
@ -40,6 +42,7 @@ typedef struct grpc_tcp_client_vtable {
interested_parties points to a set of pollsets that would be interested
in this connection being established (in order to continue their work) */
void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,

@ -66,14 +66,16 @@ struct CFStreamConnect {
grpc_endpoint** endpoint;
int refs;
std::string addr_name;
grpc_resource_quota* resource_quota;
grpc_slice_allocator* slice_allocator;
};
static void CFStreamConnectCleanup(CFStreamConnect* connect) {
grpc_resource_quota_unref_internal(connect->resource_quota);
CFSTREAM_HANDLE_UNREF(connect->stream_handle, "async connect clean up");
CFRelease(connect->read_stream);
CFRelease(connect->write_stream);
if (connect->slice_allocator != nullptr) {
grpc_slice_allocator_destroy(connect->slice_allocator);
}
gpr_mu_destroy(&connect->mu);
delete connect;
}
@ -130,8 +132,9 @@ static void OnOpen(void* arg, grpc_error_handle error) {
if (error == GRPC_ERROR_NONE) {
*endpoint = grpc_cfstream_endpoint_create(
connect->read_stream, connect->write_stream,
connect->addr_name.c_str(), connect->resource_quota,
connect->addr_name.c_str(), connect->slice_allocator,
connect->stream_handle);
connect->slice_allocator = nullptr;
}
} else {
GRPC_ERROR_REF(error);
@ -153,6 +156,7 @@ static void ParseResolvedAddress(const grpc_resolved_address* addr,
}
static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* resolved_addr,
@ -161,7 +165,6 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep,
connect->closure = closure;
connect->endpoint = ep;
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
// connect->resource_quota = resource_quota;
connect->refs = 2; // One for the connect operation, one for the timer.
gpr_ref_init(&connect->refcount, 1);
gpr_mu_init(&connect->mu);
@ -170,18 +173,7 @@ static void CFStreamClientConnect(grpc_closure* closure, grpc_endpoint** ep,
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p, %s: asynchronously connecting",
connect, connect->addr_name.c_str());
}
grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
}
}
connect->resource_quota = resource_quota;
connect->slice_allocator = slice_allocator;
CFReadStreamRef read_stream;
CFWriteStreamRef write_stream;

@ -43,12 +43,14 @@ struct grpc_custom_tcp_connect {
grpc_endpoint** endpoint;
int refs;
std::string addr_name;
grpc_resource_quota* resource_quota;
grpc_slice_allocator* slice_allocator;
};
static void custom_tcp_connect_cleanup(grpc_custom_tcp_connect* connect) {
if (connect->slice_allocator != nullptr) {
grpc_slice_allocator_destroy(connect->slice_allocator);
}
grpc_custom_socket* socket = connect->socket;
grpc_resource_quota_unref_internal(connect->resource_quota);
delete connect;
socket->refs--;
if (socket->refs == 0) {
@ -87,7 +89,8 @@ static void custom_connect_callback_internal(grpc_custom_socket* socket,
grpc_timer_cancel(&connect->alarm);
if (error == GRPC_ERROR_NONE) {
*connect->endpoint = custom_tcp_endpoint_create(
socket, connect->resource_quota, connect->addr_name.c_str());
socket, connect->slice_allocator, connect->addr_name.c_str());
connect->slice_allocator = nullptr;
}
done = (--connect->refs == 0);
if (done) {
@ -111,6 +114,7 @@ static void custom_connect_callback(grpc_custom_socket* socket,
}
static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* resolved_addr,
@ -118,17 +122,6 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
(void)channel_args;
(void)interested_parties;
grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
if (channel_args != nullptr) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota =
grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
channel_args->args[i].value.pointer.p));
}
}
}
grpc_custom_socket* socket =
static_cast<grpc_custom_socket*>(gpr_malloc(sizeof(grpc_custom_socket)));
socket->refs = 2;
@ -137,7 +130,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
connect->closure = closure;
connect->endpoint = ep;
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
connect->resource_quota = resource_quota;
connect->slice_allocator = slice_allocator;
connect->socket = socket;
socket->connector = connect;
socket->endpoint = nullptr;

@ -63,6 +63,7 @@ struct async_connect {
grpc_endpoint** ep;
grpc_closure* closure;
grpc_channel_args* channel_args;
grpc_slice_allocator* slice_allocator;
};
static grpc_error_handle prepare_socket(const grpc_resolved_address* addr,
@ -118,14 +119,18 @@ static void tc_on_alarm(void* acp, grpc_error_handle error) {
gpr_mu_unlock(&ac->mu);
if (done) {
gpr_mu_destroy(&ac->mu);
if (ac->slice_allocator != nullptr) {
grpc_slice_allocator_destroy(ac->slice_allocator);
}
grpc_channel_args_destroy(ac->channel_args);
delete ac;
}
}
grpc_endpoint* grpc_tcp_client_create_from_fd(
grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str) {
return grpc_tcp_create(fd, channel_args, addr_str);
grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str,
grpc_slice_allocator* slice_allocator) {
return grpc_tcp_create(fd, channel_args, addr_str, slice_allocator);
}
static void on_writable(void* acp, grpc_error_handle error) {
@ -174,8 +179,9 @@ static void on_writable(void* acp, grpc_error_handle error) {
switch (so_error) {
case 0:
grpc_pollset_set_del_fd(ac->interested_parties, fd);
*ep = grpc_tcp_client_create_from_fd(fd, ac->channel_args,
ac->addr_str.c_str());
*ep = grpc_tcp_client_create_from_fd(
fd, ac->channel_args, ac->addr_str.c_str(), ac->slice_allocator);
ac->slice_allocator = nullptr;
fd = nullptr;
break;
case ENOBUFS:
@ -237,6 +243,10 @@ finish:
// This is safe even outside the lock, because "done", the sentinel, is
// populated *inside* the lock.
gpr_mu_destroy(&ac->mu);
if (ac->slice_allocator != nullptr) {
grpc_slice_allocator_destroy(ac->slice_allocator);
ac->slice_allocator = nullptr;
}
grpc_channel_args_destroy(ac->channel_args);
delete ac;
}
@ -279,7 +289,8 @@ grpc_error_handle grpc_tcp_client_prepare_fd(
void grpc_tcp_client_create_from_prepared_fd(
grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd,
const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
grpc_millis deadline, grpc_endpoint** ep) {
grpc_millis deadline, grpc_endpoint** ep,
grpc_slice_allocator* slice_allocator) {
int err;
do {
err = connect(fd, reinterpret_cast<const grpc_sockaddr*>(addr->addr),
@ -291,11 +302,13 @@ void grpc_tcp_client_create_from_prepared_fd(
if (err >= 0) {
*ep = grpc_tcp_client_create_from_fd(fdobj, channel_args,
grpc_sockaddr_to_uri(addr).c_str());
grpc_sockaddr_to_uri(addr).c_str(),
slice_allocator);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_NONE);
return;
}
if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
grpc_slice_allocator_destroy(slice_allocator);
grpc_error_handle error = GRPC_OS_ERROR(errno, "connect");
error = grpc_error_set_str(
error, GRPC_ERROR_STR_TARGET_ADDRESS,
@ -315,6 +328,7 @@ void grpc_tcp_client_create_from_prepared_fd(
ac->addr_str = grpc_sockaddr_to_uri(addr);
gpr_mu_init(&ac->mu);
ac->refs = 2;
ac->slice_allocator = slice_allocator;
GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac,
grpc_schedule_on_exec_ctx);
ac->channel_args = grpc_channel_args_copy(channel_args);
@ -332,6 +346,7 @@ void grpc_tcp_client_create_from_prepared_fd(
}
static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
@ -342,12 +357,13 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
*ep = nullptr;
if ((error = grpc_tcp_client_prepare_fd(channel_args, addr, &mapped_addr,
&fd)) != GRPC_ERROR_NONE) {
grpc_slice_allocator_destroy(slice_allocator);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, error);
return;
}
grpc_tcp_client_create_from_prepared_fd(interested_parties, closure, fd,
channel_args, &mapped_addr, deadline,
ep);
ep, slice_allocator);
}
grpc_tcp_client_vtable grpc_posix_tcp_client_vtable = {tcp_connect};

@ -30,10 +30,12 @@
fd: a connected FD. Ownership is taken.
channel_args: may contain custom settings for the endpoint
addr_str: destination address in printable format
slice_allocator: ownership is taken by client.
Returns: a new endpoint
*/
grpc_endpoint* grpc_tcp_client_create_from_fd(
grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str);
grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str,
grpc_slice_allocator* slice_allocator);
/* Return a configured, unbound, unconnected TCP client fd.
@ -62,6 +64,7 @@ grpc_error_handle grpc_tcp_client_prepare_fd(
void grpc_tcp_client_create_from_prepared_fd(
grpc_pollset_set* interested_parties, grpc_closure* closure, const int fd,
const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
grpc_millis deadline, grpc_endpoint** ep);
grpc_millis deadline, grpc_endpoint** ep,
grpc_slice_allocator* slice_allocator);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */

@ -52,6 +52,7 @@ struct async_connect {
grpc_closure on_connect;
grpc_endpoint** endpoint;
grpc_channel_args* channel_args;
grpc_slice_allocator* slice_allocator;
};
static void async_connect_unlock_and_cleanup(async_connect* ac,
@ -61,6 +62,9 @@ static void async_connect_unlock_and_cleanup(async_connect* ac,
if (done) {
grpc_channel_args_destroy(ac->channel_args);
gpr_mu_destroy(&ac->mu);
if (ac->slice_allocator != nullptr) {
grpc_slice_allocator_destroy(ac->slice_allocator);
}
delete ac;
}
if (socket != NULL) grpc_winsocket_destroy(socket);
@ -106,8 +110,10 @@ static void on_connect(void* acp, grpc_error_handle error) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
closesocket(socket->socket);
} else {
*ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name.c_str());
socket = NULL;
*ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name.c_str(),
ac->slice_allocator);
ac->slice_allocator = nullptr;
socket = nullptr;
}
} else {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("socket is null");
@ -123,6 +129,7 @@ static void on_connect(void* acp, grpc_error_handle error) {
/* Tries to issue one async connection, then schedules both an IOCP
notification request for the connection, and one timeout alert. */
static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
grpc_slice_allocator* slice_allocator,
grpc_pollset_set* interested_parties,
const grpc_channel_args* channel_args,
const grpc_resolved_address* addr,
@ -202,6 +209,7 @@ static void tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
ac->refs = 2;
ac->addr_name = grpc_sockaddr_to_uri(addr);
ac->endpoint = endpoint;
ac->slice_allocator = slice_allocator;
ac->channel_args = grpc_channel_args_copy(channel_args);
GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
@ -221,6 +229,7 @@ failure:
GRPC_ERROR_STR_TARGET_ADDRESS,
grpc_slice_from_cpp_string(std::move(target_uri)));
GRPC_ERROR_UNREF(error);
grpc_slice_allocator_destroy(slice_allocator);
if (socket != NULL) {
grpc_winsocket_destroy(socket);
} else if (sock != INVALID_SOCKET) {

@ -64,8 +64,7 @@ struct custom_tcp_endpoint {
grpc_slice_buffer* read_slices = nullptr;
grpc_slice_buffer* write_slices = nullptr;
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
grpc_slice_allocator* slice_allocator;
bool shutting_down;
@ -75,7 +74,7 @@ struct custom_tcp_endpoint {
static void tcp_free(grpc_custom_socket* s) {
custom_tcp_endpoint* tcp =
reinterpret_cast<custom_tcp_endpoint*>(s->endpoint);
grpc_resource_user_unref(tcp->resource_user);
grpc_slice_allocator_destroy(tcp->slice_allocator);
delete tcp;
s->refs--;
if (s->refs == 0) {
@ -203,9 +202,10 @@ static void endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
tcp->read_slices = read_slices;
grpc_slice_buffer_reset_and_unref_internal(read_slices);
TCP_REF(tcp, "read");
if (grpc_resource_user_alloc_slices(&tcp->slice_allocator,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
tcp->read_slices)) {
if (grpc_slice_allocator_allocate(
tcp->slice_allocator, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
grpc_slice_allocator_intent::kReadBuffer, tcp->read_slices,
tcp_read_allocation_done, tcp)) {
tcp_read_allocation_done(tcp, GRPC_ERROR_NONE);
}
}
@ -297,7 +297,6 @@ static void endpoint_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
// GRPC_ERROR_REF(why));
// grpc_core::ExecCtx::Run(DEBUG_LOCATION,tcp->write_cb,
// GRPC_ERROR_REF(why)); tcp->read_cb = nullptr; tcp->write_cb = nullptr;
grpc_resource_user_shutdown(tcp->resource_user);
grpc_custom_socket_vtable->shutdown(tcp->socket);
}
GRPC_ERROR_UNREF(why);
@ -332,11 +331,6 @@ static absl::string_view endpoint_get_local_address(grpc_endpoint* ep) {
return tcp->local_address;
}
static grpc_resource_user* endpoint_get_resource_user(grpc_endpoint* ep) {
custom_tcp_endpoint* tcp = reinterpret_cast<custom_tcp_endpoint*>(ep);
return tcp->resource_user;
}
static int endpoint_get_fd(grpc_endpoint* /*ep*/) { return -1; }
static bool endpoint_can_track_err(grpc_endpoint* /*ep*/) { return false; }
@ -348,14 +342,13 @@ static grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_delete_from_pollset_set,
endpoint_shutdown,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
endpoint_get_local_address,
endpoint_get_fd,
endpoint_can_track_err};
grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_resource_quota* resource_quota,
grpc_slice_allocator* slice_allocator,
const char* peer_string) {
custom_tcp_endpoint* tcp = new custom_tcp_endpoint;
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
@ -381,9 +374,6 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
tcp->local_address = grpc_sockaddr_to_uri(&resolved_local_addr);
}
tcp->shutting_down = false;
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
&tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
tcp->slice_allocator = slice_allocator;
return &tcp->base;
}

@ -78,8 +78,9 @@ void grpc_custom_endpoint_init(grpc_socket_vtable* impl);
void grpc_custom_close_server_callback(grpc_tcp_listener* listener);
/// Takes ownership of \a slice_allocator.
grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_resource_quota* resource_quota,
grpc_slice_allocator* slice_allocator,
const char* peer_string);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_CUSTOM_H */

@ -55,6 +55,7 @@
#include "src/core/lib/iomgr/buffer_list.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/slice/slice_internal.h"
@ -392,8 +393,7 @@ struct grpc_tcp {
std::string peer_string;
std::string local_address;
grpc_resource_user* resource_user;
grpc_resource_user_slice_allocator slice_allocator;
grpc_slice_allocator* slice_allocator;
grpc_core::TracedBuffer* tb_head; /* List of traced buffers */
gpr_mu tb_mu; /* Lock for access to list of traced buffers */
@ -584,24 +584,6 @@ static void finish_estimate(grpc_tcp* tcp) {
tcp->bytes_read_this_round = 0;
}
static size_t get_target_read_size(grpc_tcp* tcp) {
grpc_resource_quota* rq = grpc_resource_user_quota(tcp->resource_user);
double pressure = grpc_resource_quota_get_memory_pressure(rq);
double target =
tcp->target_length * (pressure > 0.8 ? (1.0 - pressure) / 0.2 : 1.0);
size_t sz = ((static_cast<size_t> GPR_CLAMP(target, tcp->min_read_chunk_size,
tcp->max_read_chunk_size)) +
255) &
~static_cast<size_t>(255);
/* don't use more than 1/16th of the overall resource quota for a single read
* alloc */
size_t rqmax = grpc_resource_quota_peek_size(rq);
if (sz > rqmax / 16 && rqmax > 1024) {
sz = rqmax / 16;
}
return sz;
}
static grpc_error_handle tcp_annotate_error(grpc_error_handle src_error,
grpc_tcp* tcp) {
return grpc_error_set_str(
@ -621,14 +603,13 @@ static void tcp_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
ZerocopyDisableAndWaitForRemaining(tcp);
grpc_fd_shutdown(tcp->em_fd, why);
grpc_resource_user_shutdown(tcp->resource_user);
}
static void tcp_free(grpc_tcp* tcp) {
grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
"tcp_unref_orphan");
grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user);
grpc_slice_allocator_destroy(tcp->slice_allocator);
/* The lock is not really necessary here, since all refs have been released */
gpr_mu_lock(&tcp->tb_mu);
grpc_core::TracedBuffer::Shutdown(
@ -864,16 +845,16 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error_handle error) {
}
static void tcp_continue_read(grpc_tcp* tcp) {
size_t target_read_size = get_target_read_size(tcp);
/* Wait for allocation only when there is no buffer left. */
if (tcp->incoming_buffer->length == 0 &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
}
if (GPR_UNLIKELY(!grpc_resource_user_alloc_slices(&tcp->slice_allocator,
target_read_size, 1,
tcp->incoming_buffer))) {
if (GPR_UNLIKELY(!grpc_slice_allocator_allocate(
tcp->slice_allocator, tcp->target_length, 1,
grpc_slice_allocator_intent::kReadBuffer, tcp->incoming_buffer,
tcp_read_allocation_done, tcp))) {
// Wait for allocation.
return;
}
@ -1656,11 +1637,6 @@ static int tcp_get_fd(grpc_endpoint* ep) {
return tcp->fd;
}
static grpc_resource_user* tcp_get_resource_user(grpc_endpoint* ep) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
return tcp->resource_user;
}
static bool tcp_can_track_err(grpc_endpoint* ep) {
grpc_tcp* tcp = reinterpret_cast<grpc_tcp*>(ep);
if (!grpc_event_engine_can_track_errors()) {
@ -1681,7 +1657,6 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
tcp_delete_from_pollset_set,
tcp_shutdown,
tcp_destroy,
tcp_get_resource_user,
tcp_get_peer,
tcp_get_local_address,
tcp_get_fd,
@ -1691,7 +1666,8 @@ static const grpc_endpoint_vtable vtable = {tcp_read,
grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
const grpc_channel_args* channel_args,
const char* peer_string) {
const char* peer_string,
grpc_slice_allocator* slice_allocator) {
static constexpr bool kZerocpTxEnabledDefault = false;
int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
int tcp_max_read_chunk_size = 4 * 1024 * 1024;
@ -1701,7 +1677,6 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
grpc_core::TcpZerocopySendCtx::kDefaultSendBytesThreshold;
int tcp_tx_zerocopy_max_simult_sends =
grpc_core::TcpZerocopySendCtx::kDefaultMaxSends;
grpc_resource_quota* resource_quota = grpc_resource_quota_create(nullptr);
if (channel_args != nullptr) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 ==
@ -1719,12 +1694,6 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
grpc_integer_options options = {tcp_read_chunk_size, 1, MAX_CHUNK_SIZE};
tcp_max_read_chunk_size =
grpc_channel_arg_get_integer(&channel_args->args[i], options);
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota =
grpc_resource_quota_ref_internal(static_cast<grpc_resource_quota*>(
channel_args->args[i].value.pointer.p));
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_TCP_TX_ZEROCOPY_ENABLED)) {
tcp_tx_zerocopy_enabled = grpc_channel_arg_get_bool(
@ -1757,6 +1726,7 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
tcp->base.vtable = &vtable;
tcp->peer_string = peer_string;
tcp->fd = grpc_fd_wrapped_fd(em_fd);
tcp->slice_allocator = slice_allocator;
grpc_resolved_address resolved_local_addr;
memset(&resolved_local_addr, 0, sizeof(resolved_local_addr));
resolved_local_addr.len = sizeof(resolved_local_addr.addr);
@ -1801,10 +1771,6 @@ grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
tcp->em_fd = em_fd;
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_user_slice_allocator_init(
&tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_init(&tcp->tb_mu);
tcp->tb_head = nullptr;
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,

@ -37,23 +37,23 @@
#include "src/core/lib/iomgr/buffer_list.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/ev_posix.h"
#include "src/core/lib/iomgr/resource_quota.h"
extern grpc_core::TraceFlag grpc_tcp_trace;
/* Create a tcp endpoint given a file desciptor and a read slice size.
Takes ownership of fd. */
/// Create a tcp endpoint given a file desciptor and a read slice size.
/// Takes ownership of \a fd. Takes ownership of the \a slice_allocator.
grpc_endpoint* grpc_tcp_create(grpc_fd* fd, const grpc_channel_args* args,
const char* peer_string);
const char* peer_string,
grpc_slice_allocator* slice_allocator);
/* Return the tcp endpoint's fd, or -1 if this is not available. Does not
release the fd.
Requires: ep must be a tcp endpoint.
*/
/// Return the tcp endpoint's fd, or -1 if this is not available. Does not
/// release the fd. Requires: \a ep must be a tcp endpoint.
int grpc_tcp_fd(grpc_endpoint* ep);
/* Destroy the tcp endpoint without closing its fd. *fd will be set and done
* will be called when the endpoint is destroyed.
* Requires: ep must be a tcp endpoint and fd must not be NULL. */
/// Destroy the tcp endpoint without closing its fd. *fd will be set and done
/// will be called when the endpoint is destroyed. Requires: \a ep must be a tcp
/// endpoint and fd must not be NULL.
void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
grpc_closure* done);

@ -22,10 +22,12 @@
grpc_tcp_server_vtable* grpc_tcp_server_impl;
grpc_error_handle grpc_tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
return grpc_tcp_server_impl->create(shutdown_complete, args, server);
grpc_error_handle grpc_tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server) {
return grpc_tcp_server_impl->create(shutdown_complete, args,
slice_allocator_factory, server);
}
void grpc_tcp_server_start(grpc_tcp_server* server,

@ -63,9 +63,10 @@ class TcpServerFdHandler {
} // namespace grpc_core
typedef struct grpc_tcp_server_vtable {
grpc_error_handle (*create)(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server);
grpc_error_handle (*create)(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server);
void (*start)(grpc_tcp_server* server,
const std::vector<grpc_pollset*>* pollsets,
grpc_tcp_server_cb on_accept_cb, void* cb_arg);
@ -84,10 +85,12 @@ typedef struct grpc_tcp_server_vtable {
/* Create a server, initially not bound to any ports. The caller owns one ref.
If shutdown_complete is not NULL, it will be used by
grpc_tcp_server_unref() when the ref count reaches zero. */
grpc_error_handle grpc_tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server);
grpc_tcp_server_unref() when the ref count reaches zero.
Takes ownership of the slice_allocator_factory. */
grpc_error_handle grpc_tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server);
/* Start listening to bound ports */
void grpc_tcp_server_start(grpc_tcp_server* server,

@ -77,34 +77,17 @@ struct grpc_tcp_server {
bool shutdown;
bool so_reuseport;
grpc_resource_quota* resource_quota;
grpc_slice_allocator_factory* slice_allocator_factory;
};
static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
static grpc_error_handle tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server) {
grpc_tcp_server* s =
static_cast<grpc_tcp_server*>(gpr_malloc(sizeof(grpc_tcp_server)));
// Let the implementation decide if so_reuseport can be enabled or not.
s->so_reuseport = true;
s->resource_quota = grpc_resource_quota_create(nullptr);
for (size_t i = 0; i < (args == nullptr ? 0 : args->num_args); i++) {
if (!grpc_channel_args_find_bool(args, GRPC_ARG_ALLOW_REUSEPORT, true)) {
s->so_reuseport = false;
}
if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_unref_internal(s->resource_quota);
s->resource_quota = grpc_resource_quota_ref_internal(
static_cast<grpc_resource_quota*>(args->args[i].value.pointer.p));
} else {
grpc_resource_quota_unref_internal(s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
}
}
}
s->so_reuseport =
grpc_channel_args_find_bool(args, GRPC_ARG_ALLOW_REUSEPORT, true);
gpr_ref_init(&s->refs, 1);
s->on_accept_cb = nullptr;
s->on_accept_cb_arg = nullptr;
@ -115,6 +98,7 @@ static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
s->shutdown_starting.tail = nullptr;
s->shutdown_complete = shutdown_complete;
s->shutdown = false;
s->slice_allocator_factory = slice_allocator_factory;
*server = s;
return GRPC_ERROR_NONE;
}
@ -144,7 +128,7 @@ static void finish_shutdown(grpc_tcp_server* s) {
sp->next = nullptr;
gpr_free(sp);
}
grpc_resource_quota_unref_internal(s->resource_quota);
grpc_slice_allocator_factory_destroy(s->slice_allocator_factory);
gpr_free(s);
}
@ -235,8 +219,11 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s", sp->server,
peer_name_string.c_str());
}
ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
peer_name_string.c_str());
ep = custom_tcp_endpoint_create(
socket,
grpc_slice_allocator_factory_create_slice_allocator(
sp->server->slice_allocator_factory, peer_name_string),
peer_name_string.c_str());
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;

@ -60,9 +60,10 @@
#include "src/core/lib/iomgr/tcp_server_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
static grpc_error_handle tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server) {
grpc_tcp_server* s =
static_cast<grpc_tcp_server*>(gpr_zalloc(sizeof(grpc_tcp_server)));
s->so_reuseport = grpc_is_socket_reuse_port_supported();
@ -74,6 +75,7 @@ static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
(args->args[i].value.integer != 0);
} else {
gpr_free(s);
grpc_slice_allocator_factory_destroy(slice_allocator_factory);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(GRPC_ARG_ALLOW_REUSEPORT
" must be an integer");
}
@ -82,6 +84,7 @@ static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
s->expand_wildcard_addrs = (args->args[i].value.integer != 0);
} else {
gpr_free(s);
grpc_slice_allocator_factory_destroy(slice_allocator_factory);
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
GRPC_ARG_EXPAND_WILDCARD_ADDRS " must be an integer");
}
@ -102,6 +105,7 @@ static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
s->nports = 0;
s->channel_args = grpc_channel_args_copy(args);
s->fd_handler = nullptr;
s->slice_allocator_factory = slice_allocator_factory;
gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0);
*server = s;
return GRPC_ERROR_NONE;
@ -115,17 +119,15 @@ static void finish_shutdown(grpc_tcp_server* s) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, s->shutdown_complete,
GRPC_ERROR_NONE);
}
gpr_mu_destroy(&s->mu);
while (s->head) {
grpc_tcp_listener* sp = s->head;
s->head = sp->next;
gpr_free(sp);
}
grpc_slice_allocator_factory_destroy(s->slice_allocator_factory);
grpc_channel_args_destroy(s->channel_args);
delete s->fd_handler;
gpr_free(s);
}
@ -169,10 +171,8 @@ static void deactivated_all_ports(grpc_tcp_server* s) {
static void tcp_server_destroy(grpc_tcp_server* s) {
gpr_mu_lock(&s->mu);
GPR_ASSERT(!s->shutdown);
s->shutdown = true;
/* shutdown all fd's */
if (s->active_ports) {
grpc_tcp_listener* sp;
@ -267,10 +267,13 @@ static void on_read(void* arg, grpc_error_handle err) {
acceptor->port_index = sp->port_index;
acceptor->fd_index = sp->fd_index;
acceptor->external_connection = false;
sp->server->on_accept_cb(
sp->server->on_accept_cb_arg,
grpc_tcp_create(fdobj, sp->server->channel_args, addr_str.c_str()),
grpc_tcp_create(fdobj, sp->server->channel_args, addr_str.c_str(),
grpc_slice_allocator_factory_create_slice_allocator(
sp->server->slice_allocator_factory,
absl::StrCat("tcp_server_posix:", addr_str),
sp->server->channel_args)),
read_notifier_pollset, acceptor);
}
@ -613,9 +616,13 @@ class ExternalConnectionHandler : public grpc_core::TcpServerFdHandler {
acceptor->external_connection = true;
acceptor->listener_fd = listener_fd;
acceptor->pending_data = buf;
s_->on_accept_cb(s_->on_accept_cb_arg,
grpc_tcp_create(fdobj, s_->channel_args, addr_str.c_str()),
read_notifier_pollset, acceptor);
s_->on_accept_cb(
s_->on_accept_cb_arg,
grpc_tcp_create(
fdobj, s_->channel_args, addr_str.c_str(),
grpc_slice_allocator_factory_create_slice_allocator(
s_->slice_allocator_factory, addr_str, s_->channel_args)),
read_notifier_pollset, acceptor);
}
private:

@ -94,6 +94,9 @@ struct grpc_tcp_server {
/* a handler for external connections, owned */
grpc_core::TcpServerFdHandler* fd_handler;
/* used to create slice allocators for endpoints, owned */
grpc_slice_allocator_factory* slice_allocator_factory;
};
/* If successful, add a listener to \a s for \a addr, set \a dsmode for the

@ -97,13 +97,15 @@ struct grpc_tcp_server {
grpc_closure* shutdown_complete;
grpc_channel_args* channel_args;
grpc_slice_allocator_factory* slice_allocator_factory;
};
/* Public function. Allocates the proper data structures to hold a
grpc_tcp_server. */
static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
const grpc_channel_args* args,
grpc_tcp_server** server) {
static grpc_error_handle tcp_server_create(
grpc_closure* shutdown_complete, const grpc_channel_args* args,
grpc_slice_allocator_factory* slice_allocator_factory,
grpc_tcp_server** server) {
grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
s->channel_args = grpc_channel_args_copy(args);
gpr_ref_init(&s->refs, 1);
@ -116,6 +118,7 @@ static grpc_error_handle tcp_server_create(grpc_closure* shutdown_complete,
s->shutdown_starting.head = NULL;
s->shutdown_starting.tail = NULL;
s->shutdown_complete = shutdown_complete;
s->slice_allocator_factory = slice_allocator_factory;
*server = s;
return GRPC_ERROR_NONE;
}
@ -166,7 +169,7 @@ static void tcp_server_shutdown_starting_add(grpc_tcp_server* s,
static void tcp_server_destroy(grpc_tcp_server* s) {
grpc_tcp_listener* sp;
gpr_mu_lock(&s->mu);
grpc_slice_allocator_factory_destroy(s->slice_allocator_factory);
/* First, shutdown all fd's. This will queue abortion calls for all
of the pending accepts due to the normal operation mechanism. */
if (s->active_ports == 0) {
@ -324,7 +327,6 @@ static void on_accept(void* arg, grpc_error_handle error) {
gpr_mu_unlock(&sp->server->mu);
return;
}
/* The IOCP notified us of a completed operation. Let's grab the results,
and act accordingly. */
transfered_bytes = 0;
@ -358,8 +360,11 @@ static void on_accept(void* arg, grpc_error_handle error) {
gpr_free(utf8_message);
}
std::string fd_name = absl::StrCat("tcp_server:", peer_name_string);
ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name.c_str()),
sp->server->channel_args, peer_name_string.c_str());
ep = grpc_tcp_create(
grpc_winsocket_create(sock, fd_name.c_str()),
sp->server->channel_args, peer_name_string.c_str(),
grpc_slice_allocator_factory_create_slice_allocator(
sp->server->slice_allocator_factory, peer_name_string));
} else {
closesocket(sock);
}

@ -117,7 +117,7 @@ typedef struct grpc_tcp {
grpc_slice_buffer* write_slices;
grpc_slice_buffer* read_slices;
grpc_resource_user* resource_user;
grpc_slice_allocator* slice_allocator;
/* The IO Completion Port runs from another thread. We need some mechanism
to protect ourselves when requesting a shutdown. */
@ -133,7 +133,7 @@ static void tcp_free(grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user);
grpc_slice_allocator_destroy(tcp->slice_allocator);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
delete tcp;
}
@ -467,7 +467,6 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
}
grpc_winsocket_shutdown(tcp->socket);
gpr_mu_unlock(&tcp->mu);
grpc_resource_user_shutdown(tcp->resource_user);
}
static void win_destroy(grpc_endpoint* ep) {
@ -486,11 +485,6 @@ static absl::string_view win_get_local_address(grpc_endpoint* ep) {
return tcp->local_address;
}
static grpc_resource_user* win_get_resource_user(grpc_endpoint* ep) {
grpc_tcp* tcp = (grpc_tcp*)ep;
return tcp->resource_user;
}
static int win_get_fd(grpc_endpoint* ep) { return -1; }
static bool win_can_track_err(grpc_endpoint* ep) { return false; }
@ -502,7 +496,6 @@ static grpc_endpoint_vtable vtable = {win_read,
win_delete_from_pollset_set,
win_shutdown,
win_destroy,
win_get_resource_user,
win_get_peer,
win_get_local_address,
win_get_fd,
@ -510,17 +503,8 @@ static grpc_endpoint_vtable vtable = {win_read,
grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
grpc_channel_args* channel_args,
const char* peer_string) {
grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_unref_internal(resource_quota);
resource_quota = grpc_resource_quota_ref_internal(
(grpc_resource_quota*)channel_args->args[i].value.pointer.p);
}
}
}
const char* peer_string,
grpc_slice_allocator* slice_allocator) {
grpc_tcp* tcp = new grpc_tcp;
memset(tcp, 0, sizeof(grpc_tcp));
tcp->base.vtable = &vtable;
@ -540,9 +524,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
}
tcp->peer_string = peer_string;
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
grpc_resource_quota_unref_internal(resource_quota);
tcp->slice_allocator = slice_allocator;
return &tcp->base;
}

@ -42,7 +42,8 @@
*/
grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
grpc_channel_args* channel_args,
const char* peer_string);
const char* peer_string,
grpc_slice_allocator* slice_allocator);
grpc_error_handle grpc_tcp_prepare_socket(SOCKET sock);

@ -174,7 +174,6 @@ void AwsExternalAccountCredentials::RetrieveRegion() {
GRPC_CLOSURE_INIT(&ctx_->closure, OnRetrieveRegion, this, nullptr);
grpc_httpcli_get(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, ctx_->deadline, &ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}
@ -224,7 +223,6 @@ void AwsExternalAccountCredentials::RetrieveRoleName() {
GRPC_CLOSURE_INIT(&ctx_->closure, OnRetrieveRoleName, this, nullptr);
grpc_httpcli_get(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, ctx_->deadline, &ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}
@ -287,7 +285,6 @@ void AwsExternalAccountCredentials::RetrieveSigningKeys() {
GRPC_CLOSURE_INIT(&ctx_->closure, OnRetrieveSigningKeys, this, nullptr);
grpc_httpcli_get(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, ctx_->deadline, &ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}

@ -297,7 +297,6 @@ void ExternalAccountCredentials::ExchangeToken(
grpc_httpcli_post(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, body.c_str(), body.size(), ctx_->deadline,
&ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}
@ -387,7 +386,6 @@ void ExternalAccountCredentials::ImpersenateServiceAccount() {
grpc_httpcli_post(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, body.c_str(), body.size(), ctx_->deadline,
&ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}

@ -147,7 +147,6 @@ void UrlExternalAccountCredentials::RetrieveSubjectToken(
GRPC_CLOSURE_INIT(&ctx_->closure, OnRetrieveSubjectToken, this, nullptr);
grpc_httpcli_get(ctx_->httpcli_context, ctx_->pollent, resource_quota,
&request, ctx_->deadline, &ctx_->closure, &ctx_->response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_http_request_destroy(&request.http);
}

@ -193,7 +193,6 @@ static int is_metadata_server_reachable() {
GRPC_CLOSURE_CREATE(on_metadata_server_detection_http_response, &detector,
grpc_schedule_on_exec_ctx),
&detector.response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_core::ExecCtx::Get()->Flush();
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */

@ -709,7 +709,6 @@ static void on_openid_config_retrieved(void* user_data,
grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay,
GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
&ctx->responses[HTTP_RESPONSE_KEYS]);
grpc_resource_quota_unref_internal(resource_quota);
gpr_free(req.host);
return;
@ -834,7 +833,6 @@ static void retrieve_key_and_verify(verifier_cb_ctx* ctx) {
&ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay, http_cb,
&ctx->responses[rsp_idx]);
grpc_resource_quota_unref_internal(resource_quota);
gpr_free(req.host);
gpr_free(req.http.path);
return;

@ -401,7 +401,6 @@ class grpc_compute_engine_token_fetcher_credentials
GRPC_CLOSURE_INIT(&http_get_cb_closure_, response_cb,
metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response);
grpc_resource_quota_unref_internal(resource_quota);
}
std::string debug_string() override {
@ -462,7 +461,6 @@ void grpc_google_refresh_token_credentials::fetch_oauth2(
GRPC_CLOSURE_INIT(&http_post_cb_closure_, response_cb,
metadata_req, grpc_schedule_on_exec_ctx),
&metadata_req->response);
grpc_resource_quota_unref_internal(resource_quota);
}
grpc_google_refresh_token_credentials::grpc_google_refresh_token_credentials(
@ -595,7 +593,6 @@ class StsTokenFetcherCredentials
GRPC_CLOSURE_INIT(&http_post_cb_closure_, response_cb, metadata_req,
grpc_schedule_on_exec_ctx),
&metadata_req->response);
grpc_resource_quota_unref_internal(resource_quota);
gpr_free(body);
}

@ -416,12 +416,6 @@ static int endpoint_get_fd(grpc_endpoint* secure_ep) {
return grpc_endpoint_get_fd(ep->wrapped_ep);
}
static grpc_resource_user* endpoint_get_resource_user(
grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_get_resource_user(ep->wrapped_ep);
}
static bool endpoint_can_track_err(grpc_endpoint* secure_ep) {
secure_endpoint* ep = reinterpret_cast<secure_endpoint*>(secure_ep);
return grpc_endpoint_can_track_err(ep->wrapped_ep);
@ -434,7 +428,6 @@ static const grpc_endpoint_vtable vtable = {endpoint_read,
endpoint_delete_from_pollset_set,
endpoint_shutdown,
endpoint_destroy,
endpoint_get_resource_user,
endpoint_get_peer,
endpoint_get_local_address,
endpoint_get_fd,

@ -58,12 +58,12 @@ static void destroy_channel(void* arg, grpc_error_handle error);
grpc_channel* grpc_channel_create_with_builder(
grpc_channel_stack_builder* builder,
grpc_channel_stack_type channel_stack_type, grpc_error_handle* error) {
grpc_channel_stack_type channel_stack_type,
grpc_resource_user* resource_user, size_t preallocated_bytes,
grpc_error_handle* error) {
char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
grpc_channel_args* args = grpc_channel_args_copy(
grpc_channel_stack_builder_get_channel_arguments(builder));
grpc_resource_user* resource_user =
grpc_channel_stack_builder_get_resource_user(builder);
grpc_channel* channel;
if (channel_stack_type == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
@ -84,10 +84,17 @@ grpc_channel* grpc_channel_create_with_builder(
}
gpr_free(target);
grpc_channel_args_destroy(args);
if (resource_user != nullptr) {
if (preallocated_bytes > 0) {
grpc_resource_user_free(resource_user, preallocated_bytes);
}
grpc_resource_user_unref(resource_user);
}
return nullptr;
}
channel->target = target;
channel->resource_user = resource_user;
channel->preallocated_bytes = preallocated_bytes;
channel->is_client = grpc_channel_stack_type_is_client(channel_stack_type);
channel->registration_table.Init();
@ -225,6 +232,7 @@ grpc_channel* grpc_channel_create(const char* target,
grpc_channel_stack_type channel_stack_type,
grpc_transport* optional_transport,
grpc_resource_user* resource_user,
size_t preallocated_bytes,
grpc_error_handle* error) {
// We need to make sure that grpc_shutdown() does not shut things down
// until after the channel is destroyed. However, the channel may not
@ -259,11 +267,13 @@ grpc_channel* grpc_channel_create(const char* target,
grpc_channel_args_destroy(args);
grpc_channel_stack_builder_set_target(builder, target);
grpc_channel_stack_builder_set_transport(builder, optional_transport);
grpc_channel_stack_builder_set_resource_user(builder, resource_user);
if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
grpc_channel_stack_builder_destroy(builder);
if (resource_user != nullptr) {
grpc_resource_user_free(resource_user, GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
if (preallocated_bytes > 0) {
grpc_resource_user_free(resource_user, preallocated_bytes);
}
grpc_resource_user_unref(resource_user);
}
grpc_shutdown(); // Since we won't call destroy_channel().
return nullptr;
@ -273,8 +283,8 @@ grpc_channel* grpc_channel_create(const char* target,
if (grpc_channel_stack_type_is_client(channel_stack_type)) {
CreateChannelzNode(builder);
}
grpc_channel* channel =
grpc_channel_create_with_builder(builder, channel_stack_type, error);
grpc_channel* channel = grpc_channel_create_with_builder(
builder, channel_stack_type, resource_user, preallocated_bytes, error);
if (channel == nullptr) {
grpc_shutdown(); // Since we won't call destroy_channel().
}
@ -508,8 +518,11 @@ static void destroy_channel(void* arg, grpc_error_handle /*error*/) {
grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
channel->registration_table.Destroy();
if (channel->resource_user != nullptr) {
grpc_resource_user_free(channel->resource_user,
GRPC_RESOURCE_QUOTA_CHANNEL_SIZE);
if (channel->preallocated_bytes > 0) {
grpc_resource_user_free(channel->resource_user,
channel->preallocated_bytes);
}
grpc_resource_user_unref(channel->resource_user);
}
gpr_free(channel->target);
gpr_free(channel);

@ -30,20 +30,31 @@
#include "src/core/lib/surface/channel_stack_type.h"
#include "src/core/lib/transport/metadata.h"
/// Creates a grpc_channel.
///
/// If the \a resource_user is not null, \a preallocated_bytes may have been
/// allocated on that resource_user for use by the channel. These bytes will be
/// freed from the resource_user upon channel destruction.
///
/// Takes ownership of a \a resource_user ref.
grpc_channel* grpc_channel_create(const char* target,
const grpc_channel_args* args,
grpc_channel_stack_type channel_stack_type,
grpc_transport* optional_transport,
grpc_resource_user* resource_user = nullptr,
grpc_error_handle* error = nullptr);
grpc_resource_user* resource_user,
size_t preallocated_bytes,
grpc_error_handle* error);
/** The same as grpc_channel_destroy, but doesn't create an ExecCtx, and so
* is safe to use from within core. */
void grpc_channel_destroy_internal(grpc_channel* channel);
/// Creates a grpc_channel with a builder. See the description of
/// \a grpc_channel_create for variable definitions.
grpc_channel* grpc_channel_create_with_builder(
grpc_channel_stack_builder* builder,
grpc_channel_stack_type channel_stack_type,
grpc_resource_user* resource_user, size_t preallocated_bytes,
grpc_error_handle* error = nullptr);
/** Create a call given a grpc_channel, in order to call \a method.
@ -108,6 +119,7 @@ struct grpc_channel {
gpr_atm call_size_estimate;
grpc_resource_user* resource_user;
size_t preallocated_bytes;
// TODO(vjpai): Once the grpc_channel is allocated via new rather than malloc,
// expand the members of the CallRegistrationTable directly into

@ -177,8 +177,8 @@ grpc_channel* grpc_lame_client_channel_create(const char* target,
grpc_slice_from_static_string(error_message));
grpc_arg error_arg = grpc_core::MakeLameClientErrorArg(error);
grpc_channel_args args = {1, &error_arg};
grpc_channel* channel =
grpc_channel_create(target, &args, GRPC_CLIENT_LAME_CHANNEL, nullptr);
grpc_channel* channel = grpc_channel_create(
target, &args, GRPC_CLIENT_LAME_CHANNEL, nullptr, nullptr, 0, nullptr);
GRPC_ERROR_UNREF(error);
return channel;
}

@ -513,17 +513,6 @@ const grpc_channel_filter Server::kServerTopFilter = {
namespace {
grpc_resource_user* CreateDefaultResourceUser(const grpc_channel_args* args) {
if (args != nullptr) {
grpc_resource_quota* resource_quota =
grpc_resource_quota_from_channel_args(args, false /* create */);
if (resource_quota != nullptr) {
return grpc_resource_user_create(resource_quota, "default");
}
}
return nullptr;
}
RefCountedPtr<channelz::ServerNode> CreateChannelzNode(
const grpc_channel_args* args) {
RefCountedPtr<channelz::ServerNode> channelz_node;
@ -545,7 +534,6 @@ RefCountedPtr<channelz::ServerNode> CreateChannelzNode(
Server::Server(const grpc_channel_args* args)
: channel_args_(grpc_channel_args_copy(args)),
default_resource_user_(CreateDefaultResourceUser(args)),
channelz_node_(CreateChannelzNode(args)) {}
Server::~Server() {
@ -613,11 +601,12 @@ grpc_error_handle Server::SetupTransport(
grpc_transport* transport, grpc_pollset* accepting_pollset,
const grpc_channel_args* args,
const RefCountedPtr<grpc_core::channelz::SocketNode>& socket_node,
grpc_resource_user* resource_user) {
grpc_resource_user* resource_user, size_t preallocated_bytes) {
// Create channel.
grpc_error_handle error = GRPC_ERROR_NONE;
grpc_channel* channel = grpc_channel_create(
nullptr, args, GRPC_SERVER_CHANNEL, transport, resource_user, &error);
grpc_channel* channel =
grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport,
resource_user, preallocated_bytes, &error);
if (channel == nullptr) {
return error;
}
@ -866,11 +855,6 @@ void Server::Orphan() {
GPR_ASSERT(ShutdownCalled() || listeners_.empty());
GPR_ASSERT(listeners_destroyed_ == listeners_.size());
}
if (default_resource_user_ != nullptr) {
grpc_resource_quota_unref(grpc_resource_user_quota(default_resource_user_));
grpc_resource_user_shutdown(default_resource_user_);
grpc_resource_user_unref(default_resource_user_);
}
Unref();
}

@ -97,9 +97,6 @@ class Server : public InternallyRefCounted<Server> {
void Orphan() ABSL_LOCKS_EXCLUDED(mu_global_) override;
const grpc_channel_args* channel_args() const { return channel_args_; }
grpc_resource_user* default_resource_user() const {
return default_resource_user_;
}
channelz::ServerNode* channelz_node() const { return channelz_node_.get(); }
// Do not call this before Start(). Returns the pollsets. The
@ -128,11 +125,13 @@ class Server : public InternallyRefCounted<Server> {
// Sets up a transport. Creates a channel stack and binds the transport to
// the server. Called from the listener when a new connection is accepted.
// Takes ownership of a ref on resource_user from the caller.
grpc_error_handle SetupTransport(
grpc_transport* transport, grpc_pollset* accepting_pollset,
const grpc_channel_args* args,
const RefCountedPtr<channelz::SocketNode>& socket_node,
grpc_resource_user* resource_user = nullptr);
grpc_resource_user* resource_user = nullptr,
size_t preallocated_bytes = 0);
void RegisterCompletionQueue(grpc_completion_queue* cq);
@ -398,7 +397,6 @@ class Server : public InternallyRefCounted<Server> {
}
grpc_channel_args* const channel_args_;
grpc_resource_user* default_resource_user_ = nullptr;
RefCountedPtr<channelz::ServerNode> channelz_node_;
std::unique_ptr<grpc_server_config_fetcher> config_fetcher_;

@ -35,6 +35,7 @@
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/util/resource_user_util.h"
#define MIN_HTTP2_FRAME_SIZE 9
@ -65,8 +66,10 @@ static void set_done_write(void* arg, grpc_error_handle /*error*/) {
static void server_setup_transport(void* ts, grpc_transport* transport) {
thd_args* a = static_cast<thd_args*>(ts);
grpc_core::ExecCtx exec_ctx;
a->server->core_server->SetupTransport(
transport, nullptr, a->server->core_server->channel_args(), nullptr);
a->server->core_server->SetupTransport(transport,
/*accepting_pollset=*/nullptr,
a->server->core_server->channel_args(),
/*socket_node=*/nullptr);
}
/* Sets the read_done event */
@ -196,21 +199,19 @@ void grpc_run_bad_client_test(
/* Init grpc */
grpc_init();
/* Create endpoints */
sfd = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
/* Create server, completion events */
a.server = grpc_server_create(nullptr, nullptr);
a.cq = grpc_completion_queue_create_for_next(nullptr);
client_cq = grpc_completion_queue_create_for_next(nullptr);
grpc_server_register_completion_queue(a.server, a.cq, nullptr);
a.registered_method =
grpc_server_register_method(a.server, GRPC_BAD_CLIENT_REGISTERED_METHOD,
GRPC_BAD_CLIENT_REGISTERED_HOST,
GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER, 0);
grpc_server_start(a.server);
transport = grpc_create_chttp2_transport(nullptr, sfd.server, false);
transport = grpc_create_chttp2_transport(
nullptr, sfd.server, false, grpc_resource_user_create_unlimited());
server_setup_transport(&a, transport);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
@ -237,7 +238,6 @@ void grpc_run_bad_client_test(
/* Shutdown. */
shutdown_client(&sfd.client);
server_validator_thd.Join();
shutdown_cq = grpc_completion_queue_create_for_pluck(nullptr);
grpc_server_shutdown_and_notify(a.server, shutdown_cq, nullptr);
GPR_ASSERT(grpc_completion_queue_pluck(shutdown_cq, nullptr,

@ -43,6 +43,7 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
#include "src/core/lib/surface/server.h"
#include "test/core/util/resource_user_util.h"
static void* tag(intptr_t t) { return reinterpret_cast<void*>(t); }
@ -88,15 +89,17 @@ static void client_setup_transport(grpc_transport* transport) {
grpc_channel_args_copy_and_add(nullptr, &authority_arg, 1);
/* TODO (pjaikumar): use GRPC_CLIENT_CHANNEL instead of
* GRPC_CLIENT_DIRECT_CHANNEL */
g_ctx.client = grpc_channel_create("socketpair-target", args,
GRPC_CLIENT_DIRECT_CHANNEL, transport);
g_ctx.client =
grpc_channel_create("socketpair-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, 0, nullptr);
grpc_channel_args_destroy(args);
}
static void init_client() {
grpc_core::ExecCtx exec_ctx;
grpc_transport* transport;
transport = grpc_create_chttp2_transport(nullptr, g_ctx.ep->client, true);
transport = grpc_create_chttp2_transport(
nullptr, g_ctx.ep->client, true, grpc_resource_user_create_unlimited());
client_setup_transport(transport);
GPR_ASSERT(g_ctx.client);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
@ -109,7 +112,8 @@ static void init_server() {
g_ctx.server = grpc_server_create(nullptr, nullptr);
grpc_server_register_completion_queue(g_ctx.server, g_ctx.cq, nullptr);
grpc_server_start(g_ctx.server);
transport = grpc_create_chttp2_transport(nullptr, g_ctx.ep->server, false);
transport = grpc_create_chttp2_transport(
nullptr, g_ctx.ep->server, false, grpc_resource_user_create_unlimited());
server_setup_transport(transport);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
}

@ -47,11 +47,17 @@
/* chttp2 transport that is immediately available (used for testing
connected_channel without a client_channel */
struct custom_fixture_data {
grpc_endpoint_pair ep;
grpc_resource_quota* resource_quota;
};
static void server_setup_transport(void* ts, grpc_transport* transport) {
grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
custom_fixture_data* fixture_data =
static_cast<custom_fixture_data*>(f->fixture_data);
grpc_endpoint_add_to_pollset(fixture_data->ep.server, grpc_cq_pollset(f->cq));
grpc_error_handle error = f->server->core_server->SetupTransport(
transport, nullptr, f->server->core_server->channel_args(), nullptr);
if (error == GRPC_ERROR_NONE) {
@ -77,7 +83,7 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
grpc_error_handle error = GRPC_ERROR_NONE;
cs->f->client =
grpc_channel_create("socketpair-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, &error);
transport, nullptr, 0, &error);
grpc_channel_args_destroy(args);
if (cs->f->client != nullptr) {
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
@ -95,30 +101,32 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
}
static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
grpc_channel_args* /*client_args*/, grpc_channel_args* /*server_args*/) {
grpc_endpoint_pair* sfd =
static_cast<grpc_endpoint_pair*>(gpr_malloc(sizeof(grpc_endpoint_pair)));
grpc_channel_args* client_args, grpc_channel_args* /*server_args*/) {
custom_fixture_data* fixture_data = static_cast<custom_fixture_data*>(
gpr_malloc(sizeof(custom_fixture_data)));
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
f.fixture_data = sfd;
f.fixture_data = fixture_data;
f.cq = grpc_completion_queue_create_for_next(nullptr);
f.shutdown_cq = grpc_completion_queue_create_for_pluck(nullptr);
*sfd = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
fixture_data->resource_quota =
grpc_resource_quota_from_channel_args(client_args, true);
fixture_data->ep = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
return f;
}
static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* client_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
sp_client_setup cs;
cs.client_args = client_args;
cs.f = f;
transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
transport = grpc_create_chttp2_transport(
client_args, fixture_data->ep.client, true,
grpc_resource_user_create(fixture_data->resource_quota,
"client_transport"));
client_setup_transport(&cs, transport);
GPR_ASSERT(f->client);
}
@ -126,17 +134,23 @@ static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* server_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
GPR_ASSERT(!f->server);
f->server = grpc_server_create(server_args, nullptr);
grpc_server_register_completion_queue(f->server, f->cq, nullptr);
grpc_server_start(f->server);
transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
transport = grpc_create_chttp2_transport(
server_args, fixture_data->ep.server, false,
grpc_resource_user_create(fixture_data->resource_quota,
"server_transport"));
server_setup_transport(f, transport);
}
static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
grpc_core::ExecCtx exec_ctx;
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_resource_quota_unref(fixture_data->resource_quota);
gpr_free(f->fixture_data);
}

@ -41,11 +41,17 @@
/* chttp2 transport that is immediately available (used for testing
connected_channel without a client_channel */
struct custom_fixture_data {
grpc_endpoint_pair ep;
grpc_resource_quota* resource_quota;
};
static void server_setup_transport(void* ts, grpc_transport* transport) {
grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
custom_fixture_data* fixture_data =
static_cast<custom_fixture_data*>(f->fixture_data);
grpc_endpoint_add_to_pollset(fixture_data->ep.server, grpc_cq_pollset(f->cq));
grpc_error_handle error = f->server->core_server->SetupTransport(
transport, nullptr, f->server->core_server->channel_args(), nullptr);
if (error == GRPC_ERROR_NONE) {
@ -72,7 +78,7 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
grpc_error_handle error = GRPC_ERROR_NONE;
cs->f->client =
grpc_channel_create("socketpair-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, &error);
transport, nullptr, 0, &error);
grpc_channel_args_destroy(args);
if (cs->f->client != nullptr) {
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
@ -90,30 +96,32 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
}
static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
grpc_channel_args* /*client_args*/, grpc_channel_args* /*server_args*/) {
grpc_endpoint_pair* sfd =
static_cast<grpc_endpoint_pair*>(gpr_malloc(sizeof(grpc_endpoint_pair)));
grpc_channel_args* client_args, grpc_channel_args* /*server_args*/) {
custom_fixture_data* fixture_data = static_cast<custom_fixture_data*>(
gpr_malloc(sizeof(custom_fixture_data)));
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
f.fixture_data = sfd;
f.fixture_data = fixture_data;
f.cq = grpc_completion_queue_create_for_next(nullptr);
f.shutdown_cq = grpc_completion_queue_create_for_pluck(nullptr);
*sfd = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
fixture_data->resource_quota =
grpc_resource_quota_from_channel_args(client_args, true);
fixture_data->ep = grpc_iomgr_create_endpoint_pair("fixture", nullptr);
return f;
}
static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* client_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
sp_client_setup cs;
cs.client_args = client_args;
cs.f = f;
transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
transport = grpc_create_chttp2_transport(
client_args, fixture_data->ep.client, true,
grpc_resource_user_create(fixture_data->resource_quota,
"client_transport"));
client_setup_transport(&cs, transport);
GPR_ASSERT(f->client);
}
@ -121,17 +129,23 @@ static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* server_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
GPR_ASSERT(!f->server);
f->server = grpc_server_create(server_args, nullptr);
grpc_server_register_completion_queue(f->server, f->cq, nullptr);
grpc_server_start(f->server);
transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
transport = grpc_create_chttp2_transport(
server_args, fixture_data->ep.server, false,
grpc_resource_user_create(fixture_data->resource_quota,
"server_transport"));
server_setup_transport(f, transport);
}
static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
grpc_core::ExecCtx exec_ctx;
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_resource_quota_unref(fixture_data->resource_quota);
gpr_free(f->fixture_data);
}

@ -41,11 +41,17 @@
/* chttp2 transport that is immediately available (used for testing
connected_channel without a client_channel */
struct custom_fixture_data {
grpc_endpoint_pair ep;
grpc_resource_quota* resource_quota;
};
static void server_setup_transport(void* ts, grpc_transport* transport) {
grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
custom_fixture_data* fixture_data =
static_cast<custom_fixture_data*>(f->fixture_data);
grpc_endpoint_add_to_pollset(fixture_data->ep.server, grpc_cq_pollset(f->cq));
grpc_error_handle error = f->server->core_server->SetupTransport(
transport, nullptr, f->server->core_server->channel_args(), nullptr);
if (error == GRPC_ERROR_NONE) {
@ -72,7 +78,7 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
grpc_error_handle error = GRPC_ERROR_NONE;
cs->f->client =
grpc_channel_create("socketpair-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, &error);
transport, nullptr, 0, &error);
grpc_channel_args_destroy(args);
if (cs->f->client != nullptr) {
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
@ -91,15 +97,13 @@ static void client_setup_transport(void* ts, grpc_transport* transport) {
static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
grpc_channel_args* /*client_args*/, grpc_channel_args* /*server_args*/) {
grpc_endpoint_pair* sfd =
static_cast<grpc_endpoint_pair*>(gpr_malloc(sizeof(grpc_endpoint_pair)));
custom_fixture_data* fixture_data = static_cast<custom_fixture_data*>(
gpr_malloc(sizeof(custom_fixture_data)));
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
f.fixture_data = sfd;
f.fixture_data = fixture_data;
f.cq = grpc_completion_queue_create_for_next(nullptr);
f.shutdown_cq = grpc_completion_queue_create_for_pluck(nullptr);
grpc_arg a[3];
a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
a[0].type = GRPC_ARG_INTEGER;
@ -111,20 +115,24 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
a[2].type = GRPC_ARG_INTEGER;
a[2].value.integer = 1;
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
*sfd = grpc_iomgr_create_endpoint_pair("fixture", &args);
fixture_data->resource_quota =
grpc_resource_quota_from_channel_args(&args, true);
fixture_data->ep = grpc_iomgr_create_endpoint_pair("fixture", &args);
return f;
}
static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* client_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
sp_client_setup cs;
cs.client_args = client_args;
cs.f = f;
transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
transport = grpc_create_chttp2_transport(
client_args, fixture_data->ep.client, true,
grpc_resource_user_create(fixture_data->resource_quota,
"client_transport"));
client_setup_transport(&cs, transport);
GPR_ASSERT(f->client);
}
@ -132,17 +140,23 @@ static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
grpc_channel_args* server_args) {
grpc_core::ExecCtx exec_ctx;
grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_transport* transport;
GPR_ASSERT(!f->server);
f->server = grpc_server_create(server_args, nullptr);
grpc_server_register_completion_queue(f->server, f->cq, nullptr);
grpc_server_start(f->server);
transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
transport = grpc_create_chttp2_transport(
server_args, fixture_data->ep.server, false,
grpc_resource_user_create(fixture_data->resource_quota,
"server_transport"));
server_setup_transport(f, transport);
}
static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
grpc_core::ExecCtx exec_ctx;
auto* fixture_data = static_cast<custom_fixture_data*>(f->fixture_data);
grpc_resource_quota_unref(fixture_data->resource_quota);
gpr_free(f->fixture_data);
}

@ -51,6 +51,7 @@
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/util/port.h"
#include "test/core/util/resource_user_util.h"
struct grpc_end2end_http_proxy {
grpc_end2end_http_proxy()
@ -538,6 +539,7 @@ static void on_read_request_done_locked(void* arg, grpc_error_handle error) {
GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn,
grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&conn->on_server_connect_done, &conn->server_endpoint,
grpc_slice_allocator_create_unlimited(),
conn->pollset_set, nullptr,
&resolved_addresses->addrs[0], deadline);
grpc_resolved_addresses_destroy(resolved_addresses);
@ -612,8 +614,11 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name.c_str());
// Create TCP server.
proxy->channel_args = grpc_channel_args_copy(args);
grpc_error_handle error =
grpc_tcp_server_create(nullptr, proxy->channel_args, &proxy->server);
grpc_error_handle error = grpc_tcp_server_create(
nullptr, proxy->channel_args,
grpc_slice_allocator_factory_create(
grpc_resource_quota_from_channel_args(args, true)),
&proxy->server);
GPR_ASSERT(error == GRPC_ERROR_NONE);
// Bind to port.
grpc_resolved_address resolved_addr;

@ -46,23 +46,24 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
grpc_core::Executor::SetThreadingAll(false);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("client_fuzzer");
grpc_endpoint* mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_unref_internal(resource_quota);
grpc_resource_quota_create("context_list_test");
grpc_endpoint* mock_endpoint = grpc_mock_endpoint_create(
discard_write,
grpc_slice_allocator_create(resource_quota, "mock_endpoint"));
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
grpc_transport* transport =
grpc_create_chttp2_transport(nullptr, mock_endpoint, true);
grpc_transport* transport = grpc_create_chttp2_transport(
nullptr, mock_endpoint, true,
grpc_resource_user_create(resource_quota, "mock_transport"));
grpc_resource_quota_unref(resource_quota);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);
grpc_arg authority_arg = grpc_channel_arg_string_create(
const_cast<char*>(GRPC_ARG_DEFAULT_AUTHORITY),
const_cast<char*>("test-authority"));
grpc_channel_args* args =
grpc_channel_args_copy_and_add(nullptr, &authority_arg, 1);
grpc_channel* channel = grpc_channel_create(
"test-target", args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
grpc_channel* channel =
grpc_channel_create("test-target", args, GRPC_CLIENT_DIRECT_CHANNEL,
transport, nullptr, 0, nullptr);
grpc_channel_args_destroy(args);
grpc_slice host = grpc_slice_from_static_string("localhost");
grpc_call* call = grpc_channel_create_call(

@ -40,23 +40,23 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
{
grpc_core::ExecCtx exec_ctx;
grpc_core::Executor::SetThreadingAll(false);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("server_fuzzer");
grpc_endpoint* mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_unref_internal(resource_quota);
grpc_resource_quota_create("context_list_test");
grpc_endpoint* mock_endpoint = grpc_mock_endpoint_create(
discard_write,
grpc_slice_allocator_create(resource_quota, "mock_endpoint"));
grpc_mock_endpoint_put_read(
mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
grpc_server* server = grpc_server_create(nullptr, nullptr);
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
grpc_server_register_completion_queue(server, cq, nullptr);
// TODO(ctiller): add more registered methods (one for POST, one for PUT)
grpc_server_register_method(server, "/reg", nullptr, {}, 0);
grpc_server_start(server);
grpc_transport* transport =
grpc_create_chttp2_transport(nullptr, mock_endpoint, false);
grpc_transport* transport = grpc_create_chttp2_transport(
nullptr, mock_endpoint, false,
grpc_resource_user_create(resource_quota, "mock_transport"));
grpc_resource_quota_unref(resource_quota);
server->core_server->SetupTransport(transport, nullptr, nullptr, nullptr);
grpc_chttp2_transport_start_reading(transport, nullptr, nullptr, nullptr);

@ -83,7 +83,6 @@ static void test_get(int port) {
&g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
grpc_pollset_worker* worker = nullptr;
@ -122,7 +121,6 @@ static void test_post(int port) {
&g_context, &g_pops, resource_quota, &req, "hello", 5, n_seconds_time(15),
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
grpc_pollset_worker* worker = nullptr;

@ -87,7 +87,6 @@ static void test_get(int port) {
&g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
grpc_pollset_worker* worker = nullptr;
@ -127,7 +126,6 @@ static void test_post(int port) {
&g_context, &g_pops, resource_quota, &req, "hello", 5, n_seconds_time(15),
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
&response);
grpc_resource_quota_unref_internal(resource_quota);
gpr_mu_lock(g_mu);
while (!g_done) {
grpc_pollset_worker* worker = nullptr;

@ -41,7 +41,6 @@ static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
grpc_endpoint_pair p = grpc_iomgr_create_endpoint_pair("test", &args);
f.client_ep = p.client;
f.server_ep = p.server;
grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);

@ -39,17 +39,12 @@ int main(int argc, char** argv) {
of descriptors */
rlim.rlim_cur = rlim.rlim_max = 10;
GPR_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim));
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("fd_conservation_posix_test");
for (i = 0; i < 100; i++) {
p = grpc_iomgr_create_endpoint_pair("test", nullptr);
grpc_endpoint_destroy(p.client);
grpc_endpoint_destroy(p.server);
grpc_core::ExecCtx::Get()->Flush();
}
grpc_resource_quota_unref(resource_quota);
}
grpc_shutdown();

@ -30,6 +30,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
// static int g_connections_complete = 0;
@ -102,8 +103,8 @@ static void must_fail(void* arg, grpc_error_handle error) {
/* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr*)addr, (socklen_t*)&resolved_addr.len) == 0);
GRPC_CLOSURE_INIT(&done, must_succeed, nullptr, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, nullptr, nullptr, &resolved_addr,
GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(&done, &g_connecting, grpc_slice_allocator_create_unlimited(), nullptr,
nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
/* await the connection */
do {
@ -157,8 +158,8 @@ static void must_fail(void* arg, grpc_error_handle error) {
/* connect to a broken address */
GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, nullptr, nullptr, &resolved_addr,
GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(&done, &g_connecting, grpc_slice_allocator_create_unlimited(), nullptr,
nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
grpc_core::ExecCtx::Get()->Flush();

@ -30,6 +30,7 @@
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
static const int kConnectTimeout = 5;
@ -125,7 +126,8 @@ static bool compare_slice_buffer_with_buffer(grpc_slice_buffer *slices, const ch
/* connect to it */
XCTAssertEqual(getsockname(svr_fd, (struct sockaddr *)addr, (socklen_t *)&resolved_addr.len), 0);
init_event_closure(&done, &connected);
grpc_tcp_client_connect(&done, &ep_, nullptr, nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(&done, &ep_, grpc_slice_allocator_create_unlimited(), nullptr, nullptr,
&resolved_addr, GRPC_MILLIS_INF_FUTURE);
/* await the connection */
do {

@ -9,6 +9,7 @@ GRPC_LOCAL_SRC = '../../../../..'
# Install the dependencies in the main target plus all test targets.
target 'CFStreamTests' do
pod 'gRPC-Core', :path => GRPC_LOCAL_SRC
pod 'gRPC-Core/Tests', :path => GRPC_LOCAL_SRC
pod 'BoringSSL-GRPC', :podspec => "#{GRPC_LOCAL_SRC}/src/objective-c", :inhibit_warnings => true
end

@ -21,6 +21,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/util/test_config.h"
@ -686,67 +687,167 @@ static void test_reclaimers_can_be_posted_repeatedly(void) {
static void test_one_slice(void) {
gpr_log(GPR_INFO, "** test_one_slice **");
grpc_resource_quota* q = grpc_resource_quota_create("test_one_slice");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
grpc_resource_user_slice_allocator alloc;
grpc_slice_allocator* alloc = grpc_slice_allocator_create(q, "usr");
int num_allocs = 0;
grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_slice_allocator_allocate(
alloc, 1024, 1, grpc_slice_allocator_intent::kDefault, &buffer,
inc_int_cb, &num_allocs));
grpc_core::ExecCtx::Get()->Flush();
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_buffer_destroy_internal(&buffer);
grpc_slice_allocator_destroy(alloc);
}
grpc_resource_quota_unref(q);
}
static void test_one_slice_through_slice_allocator_factory(void) {
gpr_log(GPR_INFO, "** test_one_slice_through_slice_allocator_factory **");
grpc_resource_quota* resource_quota = grpc_resource_quota_create(
"test_one_slice_through_slice_allocator_factory");
int num_allocs = 0;
grpc_resource_quota_resize(resource_quota, 1024);
grpc_slice_allocator_factory* slice_allocator_factory =
grpc_slice_allocator_factory_create(resource_quota);
grpc_slice_allocator* slice_allocator =
grpc_slice_allocator_factory_create_slice_allocator(
slice_allocator_factory, "usr");
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer));
GPR_ASSERT(!grpc_slice_allocator_allocate(
slice_allocator, 1024, 1, grpc_slice_allocator_intent::kDefault,
&buffer, inc_int_cb, &num_allocs));
grpc_core::ExecCtx::Get()->Flush();
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_buffer_destroy_internal(&buffer);
grpc_slice_allocator_destroy(slice_allocator);
grpc_slice_allocator_factory_destroy(slice_allocator_factory);
}
}
static void test_slice_allocator_pressure_adjusted_allocation() {
gpr_log(GPR_INFO, "** test_slice_allocator_pressure_adjusted_allocation **");
// Quota large enough to avoid the 1/16 maximum allocation limit.
grpc_resource_quota* resource_quota = grpc_resource_quota_create(
"test_one_slice_through_slice_allocator_factory");
grpc_resource_quota_resize(resource_quota, 32 * 1024);
grpc_resource_user* black_hole_resource_user =
grpc_resource_user_create(resource_quota, "black hole");
{
// Consume ~95% of the quota
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_safe_alloc(black_hole_resource_user, 31 * 1024);
}
GPR_ASSERT(grpc_resource_quota_get_memory_pressure(resource_quota) > 0.95);
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
grpc_slice_allocator* constrained_allocator =
grpc_slice_allocator_create(resource_quota, "constrained user");
{
// Attempt to get 512 bytes
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_slice_allocator_allocate(
constrained_allocator, 2 * 1024, 1,
grpc_slice_allocator_intent::kReadBuffer, &buffer,
[](void*, grpc_error_handle) {}, nullptr));
}
grpc_slice slice = grpc_slice_buffer_take_first(&buffer);
GPR_ASSERT(grpc_refcounted_slice_length(slice) < 2 * 1024);
GPR_ASSERT(grpc_refcounted_slice_length(slice) >= 256);
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_unref(slice);
grpc_resource_user_free(black_hole_resource_user, 31 * 1024);
grpc_resource_user_unref(black_hole_resource_user);
grpc_slice_allocator_destroy(constrained_allocator);
grpc_resource_quota_unref(resource_quota);
grpc_slice_buffer_destroy_internal(&buffer);
}
destroy_user(usr);
grpc_resource_quota_unref(q);
}
static void test_slice_allocator_capped_allocation() {
gpr_log(GPR_INFO, "** test_slice_allocator_pressure_adjusted_allocation **");
grpc_resource_quota* resource_quota = grpc_resource_quota_create(
"test_one_slice_through_slice_allocator_factory");
grpc_resource_quota_resize(resource_quota, 32 * 1024);
grpc_arg to_add[2];
grpc_channel_args* ch_args;
to_add[0] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_TCP_MIN_READ_CHUNK_SIZE), 1024);
to_add[1] = grpc_channel_arg_integer_create(
const_cast<char*>(GRPC_ARG_TCP_MAX_READ_CHUNK_SIZE), 2048);
ch_args = grpc_channel_args_copy_and_add(nullptr, to_add, 2);
grpc_slice_allocator* slice_allocator =
grpc_slice_allocator_create(resource_quota, "capped user", ch_args);
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
{
// Attempt to get more than the maximum
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_slice_allocator_allocate(
slice_allocator, 4 * 1024, 1, grpc_slice_allocator_intent::kReadBuffer,
&buffer, [](void*, grpc_error_handle) {}, nullptr));
}
grpc_slice max_slice = grpc_slice_buffer_take_first(&buffer);
GPR_ASSERT(grpc_refcounted_slice_length(max_slice) == 2048);
{
// Attempt to get less than the minimum
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_slice_allocator_allocate(
slice_allocator, 512, 1, grpc_slice_allocator_intent::kReadBuffer,
&buffer, [](void*, grpc_error_handle) {}, nullptr));
}
grpc_slice min_slice = grpc_slice_buffer_take_first(&buffer);
GPR_ASSERT(grpc_refcounted_slice_length(min_slice) == 1024);
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_unref(max_slice);
grpc_slice_unref(min_slice);
grpc_slice_allocator_destroy(slice_allocator);
grpc_resource_quota_unref(resource_quota);
grpc_slice_buffer_destroy_internal(&buffer);
grpc_channel_args_destroy(ch_args);
}
}
static void test_one_slice_deleted_late(void) {
gpr_log(GPR_INFO, "** test_one_slice_deleted_late **");
grpc_resource_quota* q =
grpc_resource_quota_create("test_one_slice_deleted_late");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
grpc_resource_user_slice_allocator alloc;
grpc_slice_allocator* alloc = grpc_slice_allocator_create(q, "usr");
int num_allocs = 0;
grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer));
GPR_ASSERT(!grpc_slice_allocator_allocate(
alloc, 1024, 1, grpc_slice_allocator_intent::kDefault, &buffer,
inc_int_cb, &num_allocs));
grpc_core::ExecCtx::Get()->Flush();
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_unref(usr);
}
grpc_resource_quota_unref(q);
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_allocator_destroy(alloc);
grpc_resource_quota_unref(q);
grpc_slice_buffer_destroy_internal(&buffer);
}
}
@ -763,20 +864,17 @@ static void test_negative_rq_free_pool(void) {
grpc_resource_quota* q =
grpc_resource_quota_create("test_negative_rq_free_pool");
grpc_resource_quota_resize(q, 1024);
grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
grpc_resource_user_slice_allocator alloc;
grpc_slice_allocator* alloc = grpc_slice_allocator_create(q, "usr");
int num_allocs = 0;
grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);
grpc_slice_buffer buffer;
grpc_slice_buffer_init(&buffer);
{
const int start_allocs = num_allocs;
grpc_core::ExecCtx exec_ctx;
GPR_ASSERT(!grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer));
GPR_ASSERT(!grpc_slice_allocator_allocate(
alloc, 1024, 1, grpc_slice_allocator_intent::kDefault, &buffer,
inc_int_cb, &num_allocs));
grpc_core::ExecCtx::Get()->Flush();
assert_counter_becomes(&num_allocs, start_allocs + 1);
}
@ -789,12 +887,8 @@ static void test_negative_rq_free_pool(void) {
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_user_unref(usr);
}
grpc_resource_quota_unref(q);
{
grpc_core::ExecCtx exec_ctx;
grpc_slice_allocator_destroy(alloc);
grpc_resource_quota_unref(q);
grpc_slice_buffer_destroy_internal(&buffer);
}
}
@ -867,8 +961,8 @@ static void test_thread_maxquota_change() {
grpc_resource_quota_set_max_threads(rq, 150);
GPR_ASSERT(grpc_resource_user_allocate_threads(ru2, 20)); // ru2=70, ru1=50
// Decrease maxquota (Note: Quota already given to ru1 and ru2 is unaffected)
// Max threads = 10;
// Decrease maxquota (Note: Quota already given to ru1 and ru2 is
// unaffected) Max threads = 10;
grpc_resource_quota_set_max_threads(rq, 10);
// New requests will fail until quota is available
@ -918,6 +1012,9 @@ int main(int argc, char** argv) {
test_one_slice_deleted_late();
test_resize_to_zero();
test_negative_rq_free_pool();
test_one_slice_through_slice_allocator_factory();
test_slice_allocator_pressure_adjusted_allocation();
test_slice_allocator_capped_allocation();
gpr_mu_destroy(&g_mu);
gpr_cv_destroy(&g_cv);

@ -38,6 +38,7 @@
#include "src/core/lib/iomgr/pollset_set.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/timer.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
static grpc_pollset_set* g_pollset_set;
@ -77,6 +78,7 @@ static void must_fail(void* /*arg*/, grpc_error_handle error) {
}
void test_succeeds(void) {
gpr_log(GPR_ERROR, "---- starting test_succeeds() ----");
grpc_resolved_address resolved_addr;
struct sockaddr_in* addr =
reinterpret_cast<struct sockaddr_in*>(resolved_addr.addr);
@ -86,8 +88,6 @@ void test_succeeds(void) {
grpc_closure done;
grpc_core::ExecCtx exec_ctx;
gpr_log(GPR_DEBUG, "test_succeeds");
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
addr->sin_family = AF_INET;
@ -107,9 +107,9 @@ void test_succeeds(void) {
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr*)addr,
(socklen_t*)&resolved_addr.len) == 0);
GRPC_CLOSURE_INIT(&done, must_succeed, nullptr, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr,
&resolved_addr, GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(
&done, &g_connecting, grpc_slice_allocator_create_unlimited(),
g_pollset_set, nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
/* await the connection */
do {
resolved_addr.len = static_cast<socklen_t>(sizeof(addr));
@ -134,9 +134,11 @@ void test_succeeds(void) {
}
gpr_mu_unlock(g_mu);
gpr_log(GPR_ERROR, "---- finished test_succeeds() ----");
}
void test_fails(void) {
gpr_log(GPR_ERROR, "---- starting test_fails() ----");
grpc_resolved_address resolved_addr;
struct sockaddr_in* addr =
reinterpret_cast<struct sockaddr_in*>(resolved_addr.addr);
@ -144,8 +146,6 @@ void test_fails(void) {
grpc_closure done;
grpc_core::ExecCtx exec_ctx;
gpr_log(GPR_DEBUG, "test_fails");
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
addr->sin_family = AF_INET;
@ -156,9 +156,9 @@ void test_fails(void) {
/* connect to a broken address */
GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr,
&resolved_addr, GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(
&done, &g_connecting, grpc_slice_allocator_create_unlimited(),
g_pollset_set, nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
gpr_mu_lock(g_mu);
/* wait for the connection callback to finish */
@ -183,6 +183,51 @@ void test_fails(void) {
}
gpr_mu_unlock(g_mu);
gpr_log(GPR_ERROR, "---- finished test_fails() ----");
}
void test_fails_bad_addr_no_leak(void) {
gpr_log(GPR_ERROR, "---- starting test_fails_bad_addr_no_leak() ----");
grpc_resolved_address resolved_addr;
struct sockaddr_in* addr =
reinterpret_cast<struct sockaddr_in*>(resolved_addr.addr);
int connections_complete_before;
grpc_closure done;
grpc_core::ExecCtx exec_ctx;
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_in));
// force `grpc_tcp_client_prepare_fd` to fail. contrived, but effective.
addr->sin_family = AF_IPX;
gpr_mu_lock(g_mu);
connections_complete_before = g_connections_complete;
gpr_mu_unlock(g_mu);
// connect to an invalid address.
GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(
&done, &g_connecting, grpc_slice_allocator_create_unlimited(),
g_pollset_set, nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
gpr_mu_lock(g_mu);
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker* worker = nullptr;
grpc_millis polling_deadline = test_deadline();
switch (grpc_timer_check(&polling_deadline)) {
case GRPC_TIMERS_FIRED:
break;
case GRPC_TIMERS_NOT_CHECKED:
polling_deadline = 0;
ABSL_FALLTHROUGH_INTENDED;
case GRPC_TIMERS_CHECKED_AND_EMPTY:
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work",
grpc_pollset_work(g_pollset, &worker, polling_deadline)));
break;
}
gpr_mu_unlock(g_mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(g_mu);
}
gpr_mu_unlock(g_mu);
gpr_log(GPR_ERROR, "---- finished test_fails_bad_addr_no_leak() ----");
}
static void destroy_pollset(void* p, grpc_error_handle /*error*/) {
@ -202,8 +247,8 @@ int main(int argc, char** argv) {
grpc_pollset_set_add_pollset(g_pollset_set, g_pollset);
test_succeeds();
gpr_log(GPR_ERROR, "End of first test");
test_fails();
test_fails_bad_addr_no_leak();
grpc_pollset_set_destroy(g_pollset_set);
GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
grpc_schedule_on_exec_ctx);

@ -35,6 +35,7 @@
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
static gpr_mu* g_mu;
@ -108,9 +109,9 @@ void test_succeeds(void) {
GPR_ASSERT(uv_tcp_getsockname(svr_handle, (struct sockaddr*)addr,
(int*)&resolved_addr.len) == 0);
GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, NULL, NULL, &resolved_addr,
GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(&done, &g_connecting,
grpc_slice_allocator_create_unlimited(), NULL, NULL,
&resolved_addr, GRPC_MILLIS_INF_FUTURE);
gpr_mu_lock(g_mu);
while (g_connections_complete == connections_complete_before) {
@ -151,9 +152,9 @@ void test_fails(void) {
/* connect to a broken address */
GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
grpc_tcp_client_connect(&done, &g_connecting, NULL, NULL, &resolved_addr,
GRPC_MILLIS_INF_FUTURE);
grpc_tcp_client_connect(&done, &g_connecting,
grpc_slice_allocator_create_unlimited(), NULL, NULL,
&resolved_addr, GRPC_MILLIS_INF_FUTURE);
gpr_mu_lock(g_mu);
/* wait for the connection callback to finish */

@ -41,6 +41,7 @@
#include "src/core/lib/iomgr/sockaddr_posix.h"
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/iomgr/endpoint_tests.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
static gpr_mu* g_mu;
@ -218,8 +219,8 @@ static void read_test(size_t num_bytes, size_t slice_size) {
a[0].type = GRPC_ARG_INTEGER,
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
ep =
grpc_tcp_create(grpc_fd_create(sv[1], "read_test", false), &args, "test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test", false), &args, "test",
grpc_slice_allocator_create_unlimited());
grpc_endpoint_add_to_pollset(ep, g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes);
@ -270,7 +271,7 @@ static void large_read_test(size_t slice_size) {
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test", false), &args,
"test");
"test", grpc_slice_allocator_create_unlimited());
grpc_endpoint_add_to_pollset(ep, g_pollset);
written_bytes = fill_socket(sv[0]);
@ -431,7 +432,7 @@ static void write_test(size_t num_bytes, size_t slice_size,
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test", collect_timestamps),
&args, "test");
&args, "test", grpc_slice_allocator_create_unlimited());
grpc_endpoint_add_to_pollset(ep, g_pollset);
state.ep = ep;
@ -507,8 +508,8 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
a[0].type = GRPC_ARG_INTEGER;
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
ep =
grpc_tcp_create(grpc_fd_create(sv[1], "read_test", false), &args, "test");
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test", false), &args, "test",
grpc_slice_allocator_create_unlimited());
GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
grpc_endpoint_add_to_pollset(ep, g_pollset);
@ -597,18 +598,17 @@ static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
grpc_core::ExecCtx exec_ctx;
create_sockets(sv);
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("tcp_posix_test_socketpair");
grpc_arg a[1];
a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
a[0].type = GRPC_ARG_INTEGER;
a[0].value.integer = static_cast<int>(slice_size);
grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client", false),
&args, "test");
f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server", false),
&args, "test");
grpc_resource_quota_unref_internal(resource_quota);
f.client_ep =
grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client", false), &args,
"test", grpc_slice_allocator_create_unlimited());
f.server_ep =
grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server", false), &args,
"test", grpc_slice_allocator_create_unlimited());
grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(f.server_ep, g_pollset);

@ -163,14 +163,22 @@ static void on_connect(void* /*arg*/, grpc_endpoint* tcp,
static void test_no_op(void) {
grpc_core::ExecCtx exec_ctx;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(nullptr, nullptr,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)),
&s));
grpc_tcp_server_unref(s);
}
static void test_no_op_with_start(void) {
grpc_core::ExecCtx exec_ctx;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(nullptr, nullptr,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)),
&s));
LOG_TEST("test_no_op_with_start");
std::vector<grpc_pollset*> empty_pollset;
grpc_tcp_server_start(s, &empty_pollset, on_connect, nullptr);
@ -183,7 +191,11 @@ static void test_no_op_with_port(void) {
struct sockaddr_in* addr =
reinterpret_cast<struct sockaddr_in*>(resolved_addr.addr);
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(nullptr, nullptr,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)),
&s));
LOG_TEST("test_no_op_with_port");
memset(&resolved_addr, 0, sizeof(resolved_addr));
@ -203,7 +215,11 @@ static void test_no_op_with_port_and_start(void) {
struct sockaddr_in* addr =
reinterpret_cast<struct sockaddr_in*>(resolved_addr.addr);
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(nullptr, nullptr,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)),
&s));
LOG_TEST("test_no_op_with_port_and_start");
int port = -1;
@ -300,7 +316,11 @@ static void test_connect(size_t num_connects,
grpc_tcp_server* s;
const unsigned num_ports = 2;
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(nullptr, channel_args, &s));
grpc_tcp_server_create(
nullptr, channel_args,
grpc_slice_allocator_factory_create(
grpc_resource_quota_from_channel_args(channel_args, true)),
&s));
unsigned port_num;
server_weak_ref weak_ref;
server_weak_ref_init(&weak_ref);

@ -117,7 +117,11 @@ static void on_connect(void* arg, grpc_endpoint* tcp, grpc_pollset* pollset,
static void test_no_op(void) {
grpc_core::ExecCtx exec_ctx;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(NULL, NULL,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)),
&s));
grpc_tcp_server_unref(s);
grpc_core::ExecCtx::Get()->Flush();
}
@ -125,7 +129,11 @@ static void test_no_op(void) {
static void test_no_op_with_start(void) {
grpc_core::ExecCtx exec_ctx;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(NULL, NULL,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)) &
s));
LOG_TEST("test_no_op_with_start");
grpc_tcp_server_start(s, NULL, 0, on_connect, NULL);
grpc_tcp_server_unref(s);
@ -137,7 +145,11 @@ static void test_no_op_with_port(void) {
grpc_resolved_address resolved_addr;
struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(NULL, NULL,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)) &
s));
LOG_TEST("test_no_op_with_port");
memset(&resolved_addr, 0, sizeof(resolved_addr));
@ -157,7 +169,11 @@ static void test_no_op_with_port_and_start(void) {
grpc_resolved_address resolved_addr;
struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(NULL, NULL,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)) &
s));
LOG_TEST("test_no_op_with_port_and_start");
int port;
@ -227,7 +243,11 @@ static void test_connect(unsigned n) {
int svr_port;
int svr1_port;
grpc_tcp_server* s;
GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
GPR_ASSERT(GRPC_ERROR_NONE ==
grpc_tcp_server_create(NULL, NULL,
grpc_slice_allocator_factory_create(
grpc_resource_quota_create(nullptr)) &
s));
unsigned i;
server_weak_ref weak_ref;
server_weak_ref_init(&weak_ref);

@ -30,6 +30,7 @@
#include "src/core/lib/security/transport/secure_endpoint.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/tsi/fake_transport_security.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
static gpr_mu* g_mu;

@ -24,6 +24,7 @@
#include "src/core/lib/security/credentials/credentials.h"
#include "src/core/lib/security/security_connector/security_connector.h"
#include "test/core/util/mock_endpoint.h"
#include "test/core/util/resource_user_util.h"
#define CA_CERT_PATH "src/core/tsi/test_creds/ca.pem"
#define SERVER_CERT_PATH "src/core/tsi/test_creds/server1.pem"
@ -59,11 +60,10 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
{
grpc_core::ExecCtx exec_ctx;
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("ssl_server_fuzzer");
grpc_slice_allocator* slice_allocator =
grpc_slice_allocator_create_unlimited();
grpc_endpoint* mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_resource_quota_unref_internal(resource_quota);
grpc_mock_endpoint_create(discard_write, slice_allocator);
grpc_mock_endpoint_put_read(
mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
@ -114,7 +114,6 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Explicit close"));
grpc_core::ExecCtx::Get()->Flush();
}
GPR_ASSERT(state.done_callback_called);
sc.reset(DEBUG_LOCATION, "test");

@ -137,7 +137,10 @@ void bad_server_thread(void* vargs) {
grpc_sockaddr* addr = reinterpret_cast<grpc_sockaddr*>(resolved_addr.addr);
int port;
grpc_tcp_server* s;
grpc_error_handle error = grpc_tcp_server_create(nullptr, nullptr, &s);
grpc_error_handle error = grpc_tcp_server_create(
nullptr, nullptr,
grpc_slice_allocator_factory_create(grpc_resource_quota_create(nullptr)),
&s);
GPR_ASSERT(error == GRPC_ERROR_NONE);
memset(&resolved_addr, 0, sizeof(resolved_addr));
addr->sa_family = GRPC_AF_INET;

@ -71,10 +71,13 @@ TEST_F(ContextListTest, ExecuteFlushesList) {
GRPC_STREAM_REF_INIT(&ref, 1, nullptr, nullptr, "phony ref");
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("context_list_test");
grpc_endpoint* mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_transport* t =
grpc_create_chttp2_transport(nullptr, mock_endpoint, true);
grpc_endpoint* mock_endpoint = grpc_mock_endpoint_create(
discard_write,
grpc_slice_allocator_create(resource_quota, "mock_endpoint"));
grpc_transport* t = grpc_create_chttp2_transport(
nullptr, mock_endpoint, true,
grpc_resource_user_create(resource_quota, "mock_transport"));
grpc_resource_quota_unref(resource_quota);
std::vector<grpc_chttp2_stream*> s;
s.reserve(kNumElems);
gpr_atm verifier_called[kNumElems];
@ -100,7 +103,6 @@ TEST_F(ContextListTest, ExecuteFlushesList) {
gpr_free(s[i]);
}
grpc_transport_destroy(t);
grpc_resource_quota_unref(resource_quota);
exec_ctx.Flush();
}
@ -127,10 +129,13 @@ TEST_F(ContextListTest, NonEmptyListEmptyTimestamp) {
GRPC_STREAM_REF_INIT(&ref, 1, nullptr, nullptr, "phony ref");
grpc_resource_quota* resource_quota =
grpc_resource_quota_create("context_list_test");
grpc_endpoint* mock_endpoint =
grpc_mock_endpoint_create(discard_write, resource_quota);
grpc_transport* t =
grpc_create_chttp2_transport(nullptr, mock_endpoint, true);
grpc_endpoint* mock_endpoint = grpc_mock_endpoint_create(
discard_write,
grpc_slice_allocator_create(resource_quota, "mock_endpoint"));
grpc_transport* t = grpc_create_chttp2_transport(
nullptr, mock_endpoint, true,
grpc_resource_user_create(resource_quota, "mock_transport"));
grpc_resource_quota_unref(resource_quota);
std::vector<grpc_chttp2_stream*> s;
s.reserve(kNumElems);
gpr_atm verifier_called[kNumElems];
@ -155,7 +160,6 @@ TEST_F(ContextListTest, NonEmptyListEmptyTimestamp) {
gpr_free(s[i]);
}
grpc_transport_destroy(t);
grpc_resource_quota_unref(resource_quota);
exec_ctx.Flush();
}

@ -39,6 +39,7 @@
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/util/port.h"
#include "test/core/util/resource_user_util.h"
#include "test/core/util/test_config.h"
namespace grpc_core {
@ -113,9 +114,10 @@ class Client {
grpc_pollset_set* pollset_set = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(pollset_set, pollset_);
EventState state;
grpc_tcp_client_connect(state.closure(), &endpoint_, pollset_set,
nullptr /* channel_args */, server_addresses->addrs,
grpc_core::ExecCtx::Get()->Now() + 1000);
grpc_tcp_client_connect(
state.closure(), &endpoint_, grpc_slice_allocator_create_unlimited(),
pollset_set, nullptr /* channel_args */, server_addresses->addrs,
grpc_core::ExecCtx::Get()->Now() + 1000);
ASSERT_TRUE(PollUntilDone(
&state,
grpc_timespec_to_millis_round_up(gpr_inf_future(GPR_CLOCK_MONOTONIC))));

@ -46,6 +46,7 @@ grpc_cc_library(
"port_server_client.cc",
"reconnect_server.cc",
"resolve_localhost_ip46.cc",
"resource_user_util.cc",
"slice_splitter.cc",
"subprocess_posix.cc",
"subprocess_windows.cc",
@ -69,6 +70,7 @@ grpc_cc_library(
"port_server_client.h",
"reconnect_server.h",
"resolve_localhost_ip46.h",
"resource_user_util.h",
"slice_splitter.h",
"subprocess.h",
"test_config.h",
@ -79,6 +81,7 @@ grpc_cc_library(
external_deps = [
"absl/debugging:failure_signal_handler",
"absl/debugging:symbolize",
"absl/strings:str_format",
],
language = "C++",
deps = [

@ -27,8 +27,8 @@ class MockAuthorizationEndpoint : public grpc_endpoint {
absl::string_view peer_uri)
: local_address_(local_uri), peer_address_(peer_uri) {
static constexpr grpc_endpoint_vtable vtable = {
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
nullptr, nullptr, GetPeer, GetLocalAddress, nullptr, nullptr};
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
nullptr, GetPeer, GetLocalAddress, nullptr, nullptr};
grpc_endpoint::vtable = &vtable;
}

@ -41,7 +41,7 @@ typedef struct mock_endpoint {
grpc_slice_buffer read_buffer;
grpc_slice_buffer* on_read_out;
grpc_closure* on_read;
grpc_resource_user* resource_user;
grpc_slice_allocator* slice_allocator;
} mock_endpoint;
static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
@ -86,14 +86,13 @@ static void me_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
m->on_read = nullptr;
}
gpr_mu_unlock(&m->mu);
grpc_resource_user_shutdown(m->resource_user);
GRPC_ERROR_UNREF(why);
}
static void me_destroy(grpc_endpoint* ep) {
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
grpc_slice_buffer_destroy(&m->read_buffer);
grpc_resource_user_unref(m->resource_user);
grpc_slice_allocator_destroy(m->slice_allocator);
gpr_mu_destroy(&m->mu);
gpr_free(m);
}
@ -106,11 +105,6 @@ static absl::string_view me_get_local_address(grpc_endpoint* /*ep*/) {
return "fake:mock_endpoint";
}
static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
mock_endpoint* m = reinterpret_cast<mock_endpoint*>(ep);
return m->resource_user;
}
static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
@ -122,19 +116,16 @@ static const grpc_endpoint_vtable vtable = {me_read,
me_delete_from_pollset_set,
me_shutdown,
me_destroy,
me_get_resource_user,
me_get_peer,
me_get_local_address,
me_get_fd,
me_can_track_err};
grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
grpc_resource_quota* resource_quota) {
grpc_endpoint* grpc_mock_endpoint_create(
void (*on_write)(grpc_slice slice), grpc_slice_allocator* slice_allocator) {
mock_endpoint* m = static_cast<mock_endpoint*>(gpr_malloc(sizeof(*m)));
m->base.vtable = &vtable;
std::string name =
absl::StrFormat("mock_endpoint_%" PRIxPTR, reinterpret_cast<intptr_t>(m));
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
m->slice_allocator = slice_allocator;
grpc_slice_buffer_init(&m->read_buffer);
gpr_mu_init(&m->mu);
m->on_write = on_write;

@ -22,7 +22,7 @@
#include "src/core/lib/iomgr/endpoint.h"
grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
grpc_resource_quota* resource_quota);
grpc_slice_allocator* slice_allocator);
void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice);
#endif

@ -36,6 +36,7 @@
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/slice/slice_internal.h"
#include "test/core/util/resource_user_util.h"
typedef struct passthru_endpoint passthru_endpoint;
@ -45,7 +46,7 @@ typedef struct {
grpc_slice_buffer read_buffer;
grpc_slice_buffer* on_read_out;
grpc_closure* on_read;
grpc_resource_user* resource_user;
grpc_slice_allocator* slice_allocator;
} half;
struct passthru_endpoint {
@ -131,7 +132,6 @@ static void me_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
m->on_read = nullptr;
}
gpr_mu_unlock(&m->parent->mu);
grpc_resource_user_shutdown(m->resource_user);
GRPC_ERROR_UNREF(why);
}
@ -144,8 +144,8 @@ static void me_destroy(grpc_endpoint* ep) {
grpc_passthru_endpoint_stats_destroy(p->stats);
grpc_slice_buffer_destroy_internal(&p->client.read_buffer);
grpc_slice_buffer_destroy_internal(&p->server.read_buffer);
grpc_resource_user_unref(p->client.resource_user);
grpc_resource_user_unref(p->server.resource_user);
grpc_slice_allocator_destroy(p->client.slice_allocator);
grpc_slice_allocator_destroy(p->server.slice_allocator);
gpr_free(p);
} else {
gpr_mu_unlock(&p->mu);
@ -170,11 +170,6 @@ static int me_get_fd(grpc_endpoint* /*ep*/) { return -1; }
static bool me_can_track_err(grpc_endpoint* /*ep*/) { return false; }
static grpc_resource_user* me_get_resource_user(grpc_endpoint* ep) {
half* m = reinterpret_cast<half*>(ep);
return m->resource_user;
}
static const grpc_endpoint_vtable vtable = {
me_read,
me_write,
@ -183,7 +178,6 @@ static const grpc_endpoint_vtable vtable = {
me_delete_from_pollset_set,
me_shutdown,
me_destroy,
me_get_resource_user,
me_get_peer,
me_get_local_address,
me_get_fd,
@ -191,7 +185,7 @@ static const grpc_endpoint_vtable vtable = {
};
static void half_init(half* m, passthru_endpoint* parent,
grpc_resource_quota* resource_quota,
grpc_slice_allocator* slice_allocator,
const char* half_name) {
m->base.vtable = &vtable;
m->parent = parent;
@ -199,12 +193,11 @@ static void half_init(half* m, passthru_endpoint* parent,
m->on_read = nullptr;
std::string name =
absl::StrFormat("passthru_endpoint_%s_%p", half_name, parent);
m->resource_user = grpc_resource_user_create(resource_quota, name.c_str());
m->slice_allocator = slice_allocator;
}
void grpc_passthru_endpoint_create(grpc_endpoint** client,
grpc_endpoint** server,
grpc_resource_quota* resource_quota,
grpc_passthru_endpoint_stats* stats) {
passthru_endpoint* m =
static_cast<passthru_endpoint*>(gpr_malloc(sizeof(*m)));
@ -216,8 +209,8 @@ void grpc_passthru_endpoint_create(grpc_endpoint** client,
gpr_ref(&stats->refs);
m->stats = stats;
}
half_init(&m->client, m, resource_quota, "client");
half_init(&m->server, m, resource_quota, "server");
half_init(&m->client, m, grpc_slice_allocator_create_unlimited(), "client");
half_init(&m->server, m, grpc_slice_allocator_create_unlimited(), "server");
gpr_mu_init(&m->mu);
*client = &m->client.base;
*server = &m->server.base;

@ -16,8 +16,8 @@
*
*/
#ifndef MOCK_ENDPOINT_H
#define MOCK_ENDPOINT_H
#ifndef PASSTHRU_ENDPOINT_H
#define PASSTHRU_ENDPOINT_H
#include <grpc/support/atm.h>
@ -33,11 +33,10 @@ typedef struct {
void grpc_passthru_endpoint_create(grpc_endpoint** client,
grpc_endpoint** server,
grpc_resource_quota* resource_quota,
grpc_passthru_endpoint_stats* stats);
grpc_passthru_endpoint_stats* grpc_passthru_endpoint_stats_create();
void grpc_passthru_endpoint_stats_destroy(grpc_passthru_endpoint_stats* stats);
#endif
#endif // PASSTHRU_ENDPOINT_H

@ -92,7 +92,6 @@ void grpc_free_port_using_server(int port) {
GRPC_CLOSURE_CREATE(freed_port_from_server, &pr,
grpc_schedule_on_exec_ctx),
&rsp);
grpc_resource_quota_unref_internal(resource_quota);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(pr.mu);
while (!pr.done) {
@ -175,7 +174,6 @@ static void got_port_from_server(void* arg, grpc_error_handle error) {
GRPC_CLOSURE_CREATE(got_port_from_server, pr,
grpc_schedule_on_exec_ctx),
&pr->response);
grpc_resource_quota_unref_internal(resource_quota);
return;
}
GPR_ASSERT(response);
@ -225,7 +223,6 @@ int grpc_pick_port_using_server(void) {
GRPC_CLOSURE_CREATE(got_port_from_server, &pr,
grpc_schedule_on_exec_ctx),
&pr.response);
grpc_resource_quota_unref_internal(resource_quota);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(pr.mu);
while (pr.port == -1) {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save