Merge branch 'master' into callback_codegen_client_unary

reviewable/pr16646/r4
Vijay Pai 6 years ago
commit 830e5ad5df
  1. 27
      include/grpcpp/alarm.h
  2. 47
      include/grpcpp/impl/codegen/async_stream.h
  3. 6
      src/core/ext/filters/client_channel/subchannel_index.cc
  4. 47
      src/cpp/common/alarm.cc
  5. 4
      src/cpp/common/callback_common.cc
  6. 106
      test/cpp/common/alarm_test.cc
  7. 2
      test/cpp/end2end/client_callback_end2end_test.cc
  8. 33
      test/cpp/end2end/client_lb_end2end_test.cc

@ -21,6 +21,8 @@
#ifndef GRPCPP_ALARM_H
#define GRPCPP_ALARM_H
#include <functional>
#include <grpc/grpc.h>
#include <grpcpp/impl/codegen/completion_queue.h>
#include <grpcpp/impl/codegen/completion_queue_tag.h>
@ -76,8 +78,33 @@ class Alarm : private GrpcLibraryCodegen {
/// has already fired has no effect.
void Cancel();
/// NOTE: class experimental_type is not part of the public API of this class
/// TODO(vjpai): Move these contents to the public API of Alarm when
/// they are no longer experimental
class experimental_type {
public:
explicit experimental_type(Alarm* alarm) : alarm_(alarm) {}
/// Set an alarm to invoke callback \a f. The argument to the callback
/// states whether the alarm expired at \a deadline (true) or was cancelled
/// (false)
template <typename T>
void Set(const T& deadline, std::function<void(bool)> f) {
alarm_->SetInternal(TimePoint<T>(deadline).raw_time(), std::move(f));
}
private:
Alarm* alarm_;
};
/// NOTE: The function experimental() is not stable public API. It is a view
/// to the experimental components of this class. It may be changed or removed
/// at any time.
experimental_type experimental() { return experimental_type(this); }
private:
void SetInternal(CompletionQueue* cq, gpr_timespec deadline, void* tag);
void SetInternal(gpr_timespec deadline, std::function<void(bool)> f);
internal::CompletionQueueTag* alarm_;
};

@ -64,7 +64,7 @@ class ClientAsyncStreamingInterface {
/// earlier call to \a AsyncReaderInterface::Read that yielded a failed
/// result, e.g. cq->Next(&read_tag, &ok) filled in 'ok' with 'false').
///
/// This function will return when either:
/// The tag will be returned when either:
/// - all incoming messages have been read and the server has returned
/// a status.
/// - the server has returned a non-OK status.
@ -114,6 +114,9 @@ class AsyncWriterInterface {
/// queue BEFORE calling Write again.
/// This is thread-safe with respect to \a AsyncReaderInterface::Read
///
/// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
/// to deallocate once Write returns.
///
/// \param[in] msg The message to be written.
/// \param[in] tag The tag identifying the operation.
virtual void Write(const W& msg, void* tag) = 0;
@ -127,6 +130,9 @@ class AsyncWriterInterface {
/// WriteOptions \a options is used to set the write options of this message.
/// This is thread-safe with respect to \a AsyncReaderInterface::Read
///
/// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
/// to deallocate once Write returns.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] tag The tag identifying the operation.
@ -144,6 +150,9 @@ class AsyncWriterInterface {
/// the flow control window size. If \a msg size is larger than the window
/// size, it will be sent on wire without buffering.
///
/// gRPC doesn't take ownership or a reference to \a msg, so it is safe to
/// to deallocate once Write returns.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] tag The tag identifying the operation.
@ -651,6 +660,9 @@ class ServerAsyncReaderInterface
/// metadata (if not sent already), response message, and status, or if
/// some failure occurred when trying to do so.
///
/// gRPC doesn't take ownership or a reference to \a msg or \a status, so it
/// is safe to to deallocate once Finish returns.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
/// \param[in] msg To be sent to the client as the response for this call.
@ -671,6 +683,9 @@ class ServerAsyncReaderInterface
/// metadata (if not sent already), and status, or if some failure occurred
/// when trying to do so.
///
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once FinishWithError returns.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
/// - Note: \a status must have a non-OK code.
@ -718,6 +733,9 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
/// initial and trailing metadata.
///
/// Note: \a msg is not sent if \a status has a non-OK code.
///
/// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
/// is safe to to deallocate once Finish returns.
void Finish(const W& msg, const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
@ -744,6 +762,9 @@ class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
/// - also sends initial metadata if not alreay sent.
/// - uses the \a ServerContext associated with this call to send possible
/// initial and trailing metadata.
///
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once FinishWithError returns.
void FinishWithError(const Status& status, void* tag) override {
GPR_CODEGEN_ASSERT(!status.ok());
finish_ops_.set_output_tag(tag);
@ -794,6 +815,9 @@ class ServerAsyncWriterInterface
/// metadata (if not sent already), response message, and status, or if
/// some failure occurred when trying to do so.
///
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once Finish returns.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
@ -805,6 +829,9 @@ class ServerAsyncWriterInterface
/// WriteAndFinish is equivalent of performing WriteLast and Finish
/// in a single step.
///
/// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
/// is safe to to deallocate once WriteAndFinish returns.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
@ -868,6 +895,9 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
/// for sending trailing (and initial) metadata to the client.
///
/// Note: \a status must have an OK code.
///
/// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
/// is safe to to deallocate once WriteAndFinish returns.
void WriteAndFinish(const W& msg, WriteOptions options, const Status& status,
void* tag) override {
write_ops_.set_output_tag(tag);
@ -886,6 +916,9 @@ class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
///
/// Note: there are no restrictions are the code of
/// \a status,it may be non-OK
///
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once Finish returns.
void Finish(const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&finish_ops_);
@ -945,6 +978,9 @@ class ServerAsyncReaderWriterInterface
/// metadata (if not sent already), response message, and status, or if some
/// failure occurred when trying to do so.
///
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once Finish returns.
///
/// \param[in] tag Tag identifying this request.
/// \param[in] status To be sent to the client as the result of this call.
virtual void Finish(const Status& status, void* tag) = 0;
@ -956,6 +992,9 @@ class ServerAsyncReaderWriterInterface
/// WriteAndFinish is equivalent of performing WriteLast and Finish in a
/// single step.
///
/// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
/// is safe to to deallocate once WriteAndFinish returns.
///
/// \param[in] msg The message to be written.
/// \param[in] options The WriteOptions to be used to write this message.
/// \param[in] status The Status that server returns to client.
@ -1027,6 +1066,9 @@ class ServerAsyncReaderWriter final
/// for sending trailing (and initial) metadata to the client.
///
/// Note: \a status must have an OK code.
//
/// gRPC doesn't take ownership or a reference to \a msg and \a status, so it
/// is safe to to deallocate once WriteAndFinish returns.
void WriteAndFinish(const W& msg, WriteOptions options, const Status& status,
void* tag) override {
write_ops_.set_output_tag(tag);
@ -1045,6 +1087,9 @@ class ServerAsyncReaderWriter final
///
/// Note: there are no restrictions are the code of \a status,
/// it may be non-OK
//
/// gRPC doesn't take ownership or a reference to \a status, so it is safe to
/// to deallocate once Finish returns.
void Finish(const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
EnsureInitialMetadataSent(&finish_ops_);

@ -42,7 +42,7 @@ struct grpc_subchannel_key {
grpc_subchannel_args args;
};
static gpr_atm g_force_creation = false;
static bool g_force_creation = false;
static grpc_subchannel_key* create_key(
const grpc_subchannel_args* args,
@ -74,7 +74,7 @@ static grpc_subchannel_key* subchannel_key_copy(grpc_subchannel_key* k) {
int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
const grpc_subchannel_key* b) {
// To pretend the keys are different, return a non-zero value.
if (GPR_UNLIKELY(gpr_atm_no_barrier_load(&g_force_creation))) return 1;
if (GPR_UNLIKELY(g_force_creation)) return 1;
int c = GPR_ICMP(a->args.filter_count, b->args.filter_count);
if (c != 0) return c;
if (a->args.filter_count > 0) {
@ -251,5 +251,5 @@ void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
}
void grpc_subchannel_index_test_only_set_force_creation(bool force_creation) {
gpr_atm_no_barrier_store(&g_force_creation, force_creation);
g_force_creation = force_creation;
}

@ -39,17 +39,6 @@ class AlarmImpl : public CompletionQueueTag {
AlarmImpl() : cq_(nullptr), tag_(nullptr) {
gpr_ref_init(&refs_, 1);
grpc_timer_init_unset(&timer_);
GRPC_CLOSURE_INIT(&on_alarm_,
[](void* arg, grpc_error* error) {
// queue the op on the completion queue
AlarmImpl* alarm = static_cast<AlarmImpl*>(arg);
alarm->Ref();
grpc_cq_end_op(
alarm->cq_, alarm, error,
[](void* arg, grpc_cq_completion* completion) {},
arg, &alarm->completion_);
},
this, grpc_schedule_on_exec_ctx);
}
~AlarmImpl() {
grpc_core::ExecCtx exec_ctx;
@ -68,6 +57,32 @@ class AlarmImpl : public CompletionQueueTag {
cq_ = cq->cq();
tag_ = tag;
GPR_ASSERT(grpc_cq_begin_op(cq_, this));
GRPC_CLOSURE_INIT(&on_alarm_,
[](void* arg, grpc_error* error) {
// queue the op on the completion queue
AlarmImpl* alarm = static_cast<AlarmImpl*>(arg);
alarm->Ref();
grpc_cq_end_op(
alarm->cq_, alarm, error,
[](void* arg, grpc_cq_completion* completion) {},
arg, &alarm->completion_);
},
this, grpc_schedule_on_exec_ctx);
grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline),
&on_alarm_);
}
void Set(gpr_timespec deadline, std::function<void(bool)> f) {
grpc_core::ExecCtx exec_ctx;
// Don't use any CQ at all. Instead just use the timer to fire the function
callback_ = std::move(f);
Ref();
GRPC_CLOSURE_INIT(&on_alarm_,
[](void* arg, grpc_error* error) {
AlarmImpl* alarm = static_cast<AlarmImpl*>(arg);
alarm->callback_(error == GRPC_ERROR_NONE);
alarm->Unref();
},
this, grpc_schedule_on_exec_ctx);
grpc_timer_init(&timer_, grpc_timespec_to_millis_round_up(deadline),
&on_alarm_);
}
@ -95,6 +110,7 @@ class AlarmImpl : public CompletionQueueTag {
// completion queue where events about this alarm will be posted
grpc_completion_queue* cq_;
void* tag_;
std::function<void(bool)> callback_;
};
} // namespace internal
@ -113,6 +129,15 @@ void Alarm::SetInternal(CompletionQueue* cq, gpr_timespec deadline, void* tag) {
static_cast<internal::AlarmImpl*>(alarm_)->Set(cq, deadline, tag);
}
void Alarm::SetInternal(gpr_timespec deadline, std::function<void(bool)> f) {
// Note that we know that alarm_ is actually an internal::AlarmImpl
// but we declared it as the base pointer to avoid a forward declaration
// or exposing core data structures in the C++ public headers.
// Thus it is safe to use a static_cast to the subclass here, and the
// C++ style guide allows us to do so in this case
static_cast<internal::AlarmImpl*>(alarm_)->Set(deadline, std::move(f));
}
Alarm::~Alarm() {
if (alarm_ != nullptr) {
static_cast<internal::AlarmImpl*>(alarm_)->Destroy();

@ -66,8 +66,8 @@ class CallbackWithSuccessImpl : public grpc_core::CQCallbackInterface {
GPR_ASSERT(parent_->ops()->FinalizeResult(&ignored, &new_ok));
GPR_ASSERT(ignored == parent_->ops());
// Last use of func_ or ok, so ok to move them out for rvalue call above
CatchingCallback(std::move(func_), std::move(ok));
// Last use of func_, so ok to move it out for rvalue call above
CatchingCallback(std::move(func_), ok);
func_ = nullptr; // reset to clear this out for sure
grpc_call_unref(call_);

@ -16,9 +16,13 @@
*
*/
#include <condition_variable>
#include <memory>
#include <mutex>
#include <thread>
#include <grpcpp/alarm.h>
#include <grpcpp/completion_queue.h>
#include <thread>
#include <gtest/gtest.h>
@ -43,6 +47,66 @@ TEST(AlarmTest, RegularExpiry) {
EXPECT_EQ(junk, output_tag);
}
struct Completion {
bool completed = false;
std::mutex mu;
std::condition_variable cv;
};
TEST(AlarmTest, CallbackRegularExpiry) {
Alarm alarm;
auto c = std::make_shared<Completion>();
alarm.experimental().Set(
std::chrono::system_clock::now() + std::chrono::seconds(1), [c](bool ok) {
EXPECT_TRUE(ok);
std::lock_guard<std::mutex> l(c->mu);
c->completed = true;
c->cv.notify_one();
});
std::unique_lock<std::mutex> l(c->mu);
EXPECT_TRUE(c->cv.wait_until(
l, std::chrono::system_clock::now() + std::chrono::seconds(10),
[c] { return c->completed; }));
}
TEST(AlarmTest, CallbackZeroExpiry) {
Alarm alarm;
auto c = std::make_shared<Completion>();
alarm.experimental().Set(grpc_timeout_seconds_to_deadline(0), [c](bool ok) {
EXPECT_TRUE(ok);
std::lock_guard<std::mutex> l(c->mu);
c->completed = true;
c->cv.notify_one();
});
std::unique_lock<std::mutex> l(c->mu);
EXPECT_TRUE(c->cv.wait_until(
l, std::chrono::system_clock::now() + std::chrono::seconds(10),
[c] { return c->completed; }));
}
TEST(AlarmTest, CallbackNegativeExpiry) {
Alarm alarm;
auto c = std::make_shared<Completion>();
alarm.experimental().Set(
std::chrono::system_clock::now() + std::chrono::seconds(-1),
[c](bool ok) {
EXPECT_TRUE(ok);
std::lock_guard<std::mutex> l(c->mu);
c->completed = true;
c->cv.notify_one();
});
std::unique_lock<std::mutex> l(c->mu);
EXPECT_TRUE(c->cv.wait_until(
l, std::chrono::system_clock::now() + std::chrono::seconds(10),
[c] { return c->completed; }));
}
TEST(AlarmTest, MultithreadedRegularExpiry) {
CompletionQueue cq;
void* junk = reinterpret_cast<void*>(1618033);
@ -182,6 +246,26 @@ TEST(AlarmTest, Cancellation) {
EXPECT_EQ(junk, output_tag);
}
TEST(AlarmTest, CallbackCancellation) {
Alarm alarm;
auto c = std::make_shared<Completion>();
alarm.experimental().Set(
std::chrono::system_clock::now() + std::chrono::seconds(10),
[c](bool ok) {
EXPECT_FALSE(ok);
std::lock_guard<std::mutex> l(c->mu);
c->completed = true;
c->cv.notify_one();
});
alarm.Cancel();
std::unique_lock<std::mutex> l(c->mu);
EXPECT_TRUE(c->cv.wait_until(
l, std::chrono::system_clock::now() + std::chrono::seconds(1),
[c] { return c->completed; }));
}
TEST(AlarmTest, SetDestruction) {
CompletionQueue cq;
void* junk = reinterpret_cast<void*>(1618033);
@ -200,6 +284,26 @@ TEST(AlarmTest, SetDestruction) {
EXPECT_EQ(junk, output_tag);
}
TEST(AlarmTest, CallbackSetDestruction) {
auto c = std::make_shared<Completion>();
{
Alarm alarm;
alarm.experimental().Set(
std::chrono::system_clock::now() + std::chrono::seconds(10),
[c](bool ok) {
EXPECT_FALSE(ok);
std::lock_guard<std::mutex> l(c->mu);
c->completed = true;
c->cv.notify_one();
});
}
std::unique_lock<std::mutex> l(c->mu);
EXPECT_TRUE(c->cv.wait_until(
l, std::chrono::system_clock::now() + std::chrono::seconds(1),
[c] { return c->completed; }));
}
TEST(AlarmTest, UnsetDestruction) {
CompletionQueue cq;
Alarm alarm;

@ -135,6 +135,8 @@ class ClientCallbackEnd2endTest : public ::testing::Test {
if (maybe_except) {
throw - 1;
}
#else
GPR_ASSERT(!maybe_except);
#endif
});
std::unique_lock<std::mutex> l(mu);

@ -119,6 +119,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
}
void SetUp() override {
grpc_init();
response_generator_ =
grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
}
@ -127,6 +128,7 @@ class ClientLbEnd2endTest : public ::testing::Test {
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown();
}
grpc_shutdown();
}
void CreateServers(size_t num_servers,
@ -560,7 +562,23 @@ TEST_F(ClientLbEnd2endTest, PickFirstUpdateSuperset) {
EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
}
TEST_F(ClientLbEnd2endTest, PickFirstManyUpdates) {
class ClientLbEnd2endWithParamTest
: public ClientLbEnd2endTest,
public ::testing::WithParamInterface<bool> {
protected:
void SetUp() override {
grpc_subchannel_index_test_only_set_force_creation(GetParam());
ClientLbEnd2endTest::SetUp();
}
void TearDown() override {
ClientLbEnd2endTest::TearDown();
grpc_subchannel_index_test_only_set_force_creation(false);
}
};
TEST_P(ClientLbEnd2endWithParamTest, PickFirstManyUpdates) {
gpr_log(GPR_INFO, "subchannel force creation: %d", GetParam());
// Start servers and send one RPC per server.
const int kNumServers = 3;
StartServers(kNumServers);
@ -570,20 +588,21 @@ TEST_F(ClientLbEnd2endTest, PickFirstManyUpdates) {
for (size_t i = 0; i < servers_.size(); ++i) {
ports.emplace_back(servers_[i]->port_);
}
for (const bool force_creation : {true, false}) {
grpc_subchannel_index_test_only_set_force_creation(force_creation);
gpr_log(GPR_INFO, "Force subchannel creation: %d", force_creation);
for (size_t i = 0; i < 1000; ++i) {
std::shuffle(ports.begin(), ports.end(),
std::mt19937(std::random_device()()));
SetNextResolution(ports);
if (i % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
}
// We should re-enter core at the end of the loop to give the resolution
// setting closure a chance to run.
if ((i + 1) % 10 == 0) CheckRpcSendOk(stub, DEBUG_LOCATION);
}
// Check LB policy name for the channel.
EXPECT_EQ("pick_first", channel->GetLoadBalancingPolicyName());
}
INSTANTIATE_TEST_CASE_P(SubchannelForceCreation, ClientLbEnd2endWithParamTest,
::testing::Bool());
TEST_F(ClientLbEnd2endTest, PickFirstReresolutionNoSelected) {
// Prepare the ports for up servers and down servers.
const int kNumServers = 3;
@ -984,8 +1003,6 @@ TEST_F(ClientLbEnd2endTest, RoundRobinSingleReconnect) {
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
grpc_test_init(argc, argv);
grpc_init();
const auto result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}

Loading…
Cancel
Save