[mpsc] Reads should fail on read closed (#38138)

If we close reads on an mpsc then readers should also fail - not doing so can open the way for some weird stuck bugs

Closes #38138

COPYBARA_INTEGRATE_REVIEW=https://github.com/grpc/grpc/pull/38138 from ctiller:flake-fightas-29 8bc61601be
PiperOrigin-RevId: 697023583
pull/38147/head
Craig Tiller 1 week ago committed by Copybara-Service
parent 3f4f94979e
commit 83380d20a6
  1. 1
      build_autogenerated.yaml
  2. 2
      src/core/BUILD
  3. 39
      src/core/lib/promise/mpsc.h
  4. 12
      test/core/promise/mpsc_test.cc

@ -14189,6 +14189,7 @@ targets:
- src/core/lib/promise/mpsc.h
- src/core/lib/promise/poll.h
- src/core/lib/promise/promise.h
- src/core/lib/promise/status_flag.h
- src/core/lib/promise/wait_set.h
- src/core/util/atomic_utils.h
- src/core/util/down_cast.h

@ -1264,8 +1264,10 @@ grpc_cc_library(
language = "c++",
deps = [
"activity",
"dump_args",
"poll",
"ref_counted",
"status_flag",
"wait_set",
"//:gpr",
"//:ref_counted_ptr",

@ -28,7 +28,9 @@
#include "absl/log/check.h"
#include "src/core/lib/promise/activity.h"
#include "src/core/lib/promise/poll.h"
#include "src/core/lib/promise/status_flag.h"
#include "src/core/lib/promise/wait_set.h"
#include "src/core/util/dump_args.h"
#include "src/core/util/ref_counted.h"
#include "src/core/util/ref_counted_ptr.h"
#include "src/core/util/sync.h"
@ -54,18 +56,23 @@ class Center : public RefCounted<Center<T>> {
// - Returns true if new items were obtained, in which case they are contained
// in dest in the order they were added. Wakes up all pending senders since
// there will now be space to send.
// - If receives have been closed, returns false.
// - If no new items are available, returns
// false and sets up a waker to be awoken when more items are available.
// Pending and sets up a waker to be awoken when more items are available.
// TODO(ctiller): consider the problem of thundering herds here. There may be
// more senders than there are queue spots, and so the strategy of waking up
// all senders is ill-advised.
// That said, some senders may have been cancelled by the time we wake them,
// and so waking a subset could cause starvation.
bool PollReceiveBatch(std::vector<T>& dest) {
Poll<bool> PollReceiveBatch(std::vector<T>& dest) {
ReleasableMutexLock lock(&mu_);
GRPC_TRACE_LOG(promise_primitives, INFO)
<< "MPSC::PollReceiveBatch: "
<< GRPC_DUMP_ARGS(this, batch_, queue_.size());
if (queue_.empty()) {
if (batch_ == kClosedBatch) return false;
receive_waker_ = GetContext<Activity>()->MakeNonOwningWaker();
return false;
return Pending{};
}
dest.swap(queue_);
queue_.clear();
@ -97,18 +104,24 @@ class Center : public RefCounted<Center<T>> {
// Poll until a particular batch number is received.
Poll<Empty> PollReceiveBatch(uint64_t batch) {
ReleasableMutexLock lock(&mu_);
GRPC_TRACE_LOG(promise_primitives, INFO)
<< "MPSC::PollReceiveBatch: " << GRPC_DUMP_ARGS(this, batch_, batch);
if (batch_ >= batch) return Empty{};
send_wakers_.AddPending(GetContext<Activity>()->MakeNonOwningWaker());
return Pending{};
}
// Mark that the receiver is closed.
void ReceiverClosed() {
void ReceiverClosed(bool wake_receiver) {
ReleasableMutexLock lock(&mu_);
GRPC_TRACE_LOG(promise_primitives, INFO)
<< "MPSC::ReceiverClosed: " << GRPC_DUMP_ARGS(this, batch_);
if (batch_ == kClosedBatch) return;
batch_ = kClosedBatch;
auto wakeups = send_wakers_.TakeWakeupSet();
auto receive_waker = std::move(receive_waker_);
lock.Release();
if (wake_receiver) receive_waker.Wakeup();
wakeups.Wakeup();
}
@ -188,10 +201,10 @@ class MpscReceiver {
: center_(MakeRefCounted<mpscpipe_detail::Center<T>>(
std::max(static_cast<size_t>(1), max_buffer_hint / 2))) {}
~MpscReceiver() {
if (center_ != nullptr) center_->ReceiverClosed();
if (center_ != nullptr) center_->ReceiverClosed(false);
}
void MarkClosed() {
if (center_ != nullptr) center_->ReceiverClosed();
if (center_ != nullptr) center_->ReceiverClosed(true);
}
MpscReceiver(const MpscReceiver&) = delete;
MpscReceiver& operator=(const MpscReceiver&) = delete;
@ -210,15 +223,19 @@ class MpscReceiver {
// Construct a new sender for this receiver.
MpscSender<T> MakeSender() { return MpscSender<T>(center_); }
// Return a promise that will resolve to the next item (and remove said item).
// Return a promise that will resolve to ValueOrFailure<T>.
// If receiving is closed, it will resolve to failure.
// Otherwise, resolves to the next item (and removes said item).
auto Next() {
return [this]() -> Poll<T> {
return [this]() -> Poll<ValueOrFailure<T>> {
if (buffer_it_ != buffer_.end()) {
return Poll<T>(std::move(*buffer_it_++));
return Poll<ValueOrFailure<T>>(std::move(*buffer_it_++));
}
if (center_->PollReceiveBatch(buffer_)) {
auto p = center_->PollReceiveBatch(buffer_);
if (bool* r = p.value_if_ready()) {
if (!*r) return Failure{};
buffer_it_ = buffer_.begin();
return Poll<T>(std::move(*buffer_it_++));
return Poll<ValueOrFailure<T>>(std::move(*buffer_it_++));
}
return Pending{};
};

@ -192,6 +192,18 @@ TEST(MpscTest, ImmediateSendWorks) {
activity.Deactivate();
}
TEST(MpscTest, CloseFailsNext) {
StrictMock<MockActivity> activity;
MpscReceiver<Payload> receiver(1);
activity.Activate();
auto next = receiver.Next();
EXPECT_THAT(next(), IsPending());
EXPECT_CALL(activity, WakeupRequested());
receiver.MarkClosed();
EXPECT_THAT(next(), IsReady(Failure{}));
activity.Deactivate();
}
} // namespace
} // namespace grpc_core

Loading…
Cancel
Save