Fix a transport leak in subchannel (#32071)

* Use a unique_ptr with custom deleter for grpc_transport in
SubchannelConnector::Result

* comment

* fix

* iwyu

* std::ignore

* review

* explicit release ownership
pull/32034/head^2
Yijie Ma 2 years ago committed by GitHub
parent 1e32218bb8
commit fc54381aec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      src/core/ext/filters/client_channel/connector.h
  2. 6
      src/core/ext/filters/client_channel/subchannel.cc
  3. 8
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  4. 86
      test/core/end2end/fuzzers/api_fuzzer_corpus/testcase-6256752625319936

@ -28,6 +28,7 @@
#include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_fwd.h" #include "src/core/lib/iomgr/iomgr_fwd.h"
#include "src/core/lib/iomgr/resolved_address.h" #include "src/core/lib/iomgr/resolved_address.h"
#include "src/core/lib/transport/transport.h"
#include "src/core/lib/transport/transport_fwd.h" #include "src/core/lib/transport/transport_fwd.h"
namespace grpc_core { namespace grpc_core {
@ -57,7 +58,10 @@ class SubchannelConnector : public InternallyRefCounted<SubchannelConnector> {
RefCountedPtr<channelz::SocketNode> socket_node; RefCountedPtr<channelz::SocketNode> socket_node;
void Reset() { void Reset() {
if (transport != nullptr) {
grpc_transport_destroy(transport);
transport = nullptr; transport = nullptr;
}
channel_args = ChannelArgs(); channel_args = ChannelArgs();
socket_node.reset(); socket_node.reset();
} }

@ -879,6 +879,7 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error_handle error) {
void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) { void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
if (shutdown_) { if (shutdown_) {
connecting_result_.Reset();
return; return;
} }
// If we didn't get a transport or we fail to publish it, report // If we didn't get a transport or we fail to publish it, report
@ -926,12 +927,15 @@ bool Subchannel::PublishTransportLocked() {
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> stk = builder.Build(); absl::StatusOr<RefCountedPtr<grpc_channel_stack>> stk = builder.Build();
if (!stk.ok()) { if (!stk.ok()) {
auto error = absl_status_to_grpc_error(stk.status()); auto error = absl_status_to_grpc_error(stk.status());
grpc_transport_destroy(connecting_result_.transport); connecting_result_.Reset();
gpr_log(GPR_ERROR, gpr_log(GPR_ERROR,
"subchannel %p %s: error initializing subchannel stack: %s", this, "subchannel %p %s: error initializing subchannel stack: %s", this,
key_.ToString().c_str(), StatusToString(error).c_str()); key_.ToString().c_str(), StatusToString(error).c_str());
return false; return false;
} }
// Release the ownership since it is now owned by the connected filter in the
// channel stack (published).
connecting_result_.transport = nullptr;
RefCountedPtr<channelz::SocketNode> socket = RefCountedPtr<channelz::SocketNode> socket =
std::move(connecting_result_.socket_node); std::move(connecting_result_.socket_node);
connecting_result_.Reset(); connecting_result_.Reset();

@ -167,10 +167,10 @@ void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
} else if (args->endpoint != nullptr) { } else if (args->endpoint != nullptr) {
self->result_->transport = self->result_->transport =
grpc_create_chttp2_transport(args->args, args->endpoint, true); grpc_create_chttp2_transport(args->args, args->endpoint, true);
GPR_ASSERT(self->result_->transport != nullptr);
self->result_->socket_node = self->result_->socket_node =
grpc_chttp2_transport_get_socket_node(self->result_->transport); grpc_chttp2_transport_get_socket_node(self->result_->transport);
self->result_->channel_args = args->args; self->result_->channel_args = args->args;
GPR_ASSERT(self->result_->transport != nullptr);
self->endpoint_ = args->endpoint; self->endpoint_ = args->endpoint;
self->Ref().release(); // Ref held by OnReceiveSettings() self->Ref().release(); // Ref held by OnReceiveSettings()
GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings, self, GRPC_CLOSURE_INIT(&self->on_receive_settings_, OnReceiveSettings, self,
@ -206,9 +206,6 @@ void Chttp2Connector::OnReceiveSettings(void* arg, grpc_error_handle error) {
self->args_.interested_parties); self->args_.interested_parties);
if (!error.ok()) { if (!error.ok()) {
// Transport got an error while waiting on SETTINGS frame. // Transport got an error while waiting on SETTINGS frame.
// TODO(yashykt): The following two lines should be moved to
// SubchannelConnector::Result::Reset()
grpc_transport_destroy(self->result_->transport);
self->result_->Reset(); self->result_->Reset();
} }
self->MaybeNotify(error); self->MaybeNotify(error);
@ -236,9 +233,6 @@ void Chttp2Connector::OnTimeout() {
// The transport did not receive the settings frame in time. Destroy the // The transport did not receive the settings frame in time. Destroy the
// transport. // transport.
grpc_endpoint_delete_from_pollset_set(endpoint_, args_.interested_parties); grpc_endpoint_delete_from_pollset_set(endpoint_, args_.interested_parties);
// TODO(yashykt): The following two lines should be moved to
// SubchannelConnector::Result::Reset()
grpc_transport_destroy(result_->transport);
result_->Reset(); result_->Reset();
MaybeNotify(GRPC_ERROR_CREATE( MaybeNotify(GRPC_ERROR_CREATE(
"connection attempt timed out before receiving SETTINGS frame")); "connection attempt timed out before receiving SETTINGS frame"));

@ -0,0 +1,86 @@
actions {
create_server {
channel_args {
key: "grpc.resource_quota"
str: "unix::67440737046,,Sac,,t49,Sa\n\000c,,t"
}
}
}
actions {
create_channel {
target: "unix::67440737046,,Sac,,t49,Sa\n\000c,,t"
channel_args {
key: "M"
}
channel_args {
key: "grpc.resource_quota"
resource_quota {
}
}
channel_actions {
wait_ms: 4096
}
channel_actions {
add_n_bytes_writable: 1024
add_n_bytes_readable: 1
}
channel_actions {
wait_ms: 1
}
}
}
actions {
create_call {
method {
value: "2"
}
host {
value: "1"
}
timeout: 1000000127
}
}
actions {
resize_resource_quota: 3881880
}
actions {
queue_batch {
operations {
send_initial_metadata {
metadata {
key {
value: "2"
}
}
}
}
}
}
actions {
resize_resource_quota: 4294967294
}
actions {
}
actions {
}
actions {
resize_resource_quota: 3881880
}
actions {
request_call {
}
}
actions {
}
actions {
resize_resource_quota: 3881880
}
actions {
}
event_engine_actions {
tick_lengths {
}
tick_lengths {
delay_us: 5692549928996306944
}
}
Loading…
Cancel
Save