Working towards a non-blocking API test

pull/2818/head
Craig Tiller 9 years ago
parent 5148694b22
commit 69f90e6382
  1. 4
      src/core/iomgr/pollset.h
  2. 2
      src/core/iomgr/pollset_multipoller_with_epoll.c
  3. 2
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  4. 11
      src/core/iomgr/pollset_posix.c
  5. 6
      src/core/iomgr/pollset_posix.h
  6. 14
      src/core/surface/completion_queue.c
  7. 215
      test/cpp/end2end/async_end2end_test.cc

@ -76,8 +76,8 @@ void grpc_pollset_destroy(grpc_pollset *pollset);
Returns true if some work has been done, and false if the deadline
expired. */
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline);
void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec now, gpr_timespec deadline);
/* Break one polling thread out of polling work for this pollset.
If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.

@ -181,7 +181,7 @@ static void multipoll_with_epoll_pollset_maybe_work(
pfds[1].events = POLLIN;
pfds[1].revents = 0;
poll_rv = poll(pfds, 2, timeout_ms);
poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
if (poll_rv < 0) {
if (errno != EINTR) {

@ -144,7 +144,7 @@ static void multipoll_with_poll_pollset_maybe_work(
POLLOUT, &watchers[i]);
}
r = poll(pfds, pfd_count, timeout);
r = grpc_poll_function(pfds, pfd_count, timeout);
for (i = 1; i < pfd_count; i++) {
grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,

@ -38,7 +38,6 @@
#include "src/core/iomgr/pollset_posix.h"
#include <errno.h>
#include <poll.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
@ -57,6 +56,8 @@
GPR_TLS_DECL(g_current_thread_poller);
GPR_TLS_DECL(g_current_thread_worker);
grpc_poll_function_type grpc_poll_function = poll;
static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
@ -168,14 +169,11 @@ static void finish_shutdown(grpc_pollset *pollset) {
pollset->shutdown_done_cb(pollset->shutdown_done_arg);
}
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
int added_worker = 0;
if (gpr_time_cmp(now, deadline) > 0) {
return 0;
}
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
/* TODO(ctiller): pool these */
@ -217,7 +215,6 @@ done:
gpr_mu_lock(&pollset->mu);
}
}
return 1;
}
void grpc_pollset_shutdown(grpc_pollset *pollset,
@ -456,7 +453,7 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
/* poll fd count (argument 2) is shortened by one if we have no events
to poll on - such that it only includes the kicker */
r = poll(pfd, nfds, timeout);
r = grpc_poll_function(pfd, nfds, timeout);
GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);
if (fd) {

@ -34,6 +34,8 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
#include <poll.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/wakeup_fd_posix.h"
@ -117,4 +119,8 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
* be locked) */
int grpc_pollset_has_workers(grpc_pollset *pollset);
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */

@ -164,6 +164,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) {
grpc_event ret;
grpc_pollset_worker worker;
int first_loop = 1;
gpr_timespec now;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -189,12 +191,15 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ret.type = GRPC_QUEUE_SHUTDOWN;
break;
}
if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
}
first_loop = 0;
grpc_pollset_work(&cc->pollset, &worker, now, deadline);
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
GRPC_CQ_INTERNAL_UNREF(cc, "next");
@ -232,6 +237,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *c;
grpc_cq_completion *prev;
grpc_pollset_worker worker;
int first_loop = 1;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -272,14 +278,16 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ret.type = GRPC_QUEUE_TIMEOUT;
break;
}
if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
}
del_plucker(cc, tag, &worker);
first_loop = 0;
grpc_pollset_work(&cc->pollset, &worker, now, deadline);
}
done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);

@ -56,6 +56,10 @@
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_posix.h"
#endif
using grpc::cpp::test::util::EchoRequest;
using grpc::cpp::test::util::EchoResponse;
using std::chrono::system_clock;
@ -67,18 +71,64 @@ namespace {
void* tag(int i) { return (void*)(gpr_intptr) i; }
class Verifier {
#ifdef GPR_POSIX_SOCKET
static int assert_non_blocking_poll(
struct pollfd *pfds, nfds_t nfds, int timeout) {
GPR_ASSERT(timeout == 0);
return poll(pfds, nfds, timeout);
}
class PollOverride {
public:
PollOverride(grpc_poll_function_type f) {
prev_ = grpc_poll_function;
grpc_poll_function = f;
}
~PollOverride() {
grpc_poll_function = prev_;
}
private:
grpc_poll_function_type prev_;
};
class PollingCheckRegion : public PollOverride {
public:
explicit PollingCheckRegion(bool allow_blocking)
: PollOverride(allow_blocking ? poll : assert_non_blocking_poll) {}
};
#else
class PollingCheckRegion {
public:
explicit PollingCheckRegion(bool allow_blocking) {}
};
#endif
class Verifier : public PollingCheckRegion {
public:
explicit Verifier(bool spin) : PollingCheckRegion(!spin), spin_(spin) {}
Verifier& Expect(int i, bool expect_ok) {
expectations_[tag(i)] = expect_ok;
return *this;
}
void Verify(CompletionQueue *cq) {
if (spin_) gpr_log(GPR_DEBUG, "spin");
GPR_ASSERT(!expectations_.empty());
while (!expectations_.empty()) {
bool ok;
void* got_tag;
if (spin_) {
for (;;) {
auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
if (r == CompletionQueue::TIMEOUT) continue;
if (r == CompletionQueue::GOT_EVENT) break;
gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
abort();
}
} else {
EXPECT_TRUE(cq->Next(&got_tag, &ok));
}
auto it = expectations_.find(got_tag);
EXPECT_TRUE(it != expectations_.end());
EXPECT_EQ(it->second, ok);
@ -86,15 +136,33 @@ class Verifier {
}
}
void Verify(CompletionQueue *cq, std::chrono::system_clock::time_point deadline) {
if (spin_) gpr_log(GPR_DEBUG, "spin");
if (expectations_.empty()) {
bool ok;
void *got_tag;
if (spin_) {
while (std::chrono::system_clock::now() < deadline) {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME)), CompletionQueue::TIMEOUT);
}
} else {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::TIMEOUT);
}
} else {
while (!expectations_.empty()) {
bool ok;
void *got_tag;
if (spin_) {
for (;;) {
GPR_ASSERT(std::chrono::system_clock::now() < deadline);
auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
if (r == CompletionQueue::TIMEOUT) continue;
if (r == CompletionQueue::GOT_EVENT) break;
gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
abort();
}
} else {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::GOT_EVENT);
}
auto it = expectations_.find(got_tag);
EXPECT_TRUE(it != expectations_.end());
EXPECT_EQ(it->second, ok);
@ -105,9 +173,10 @@ class Verifier {
private:
std::map<void*, bool> expectations_;
bool spin_;
};
class AsyncEnd2endTest : public ::testing::Test {
class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
protected:
AsyncEnd2endTest() {}
@ -156,15 +225,15 @@ class AsyncEnd2endTest : public ::testing::Test {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer,
cq_.get(), cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
@ -178,18 +247,18 @@ class AsyncEnd2endTest : public ::testing::Test {
std::ostringstream server_address_;
};
TEST_F(AsyncEnd2endTest, SimpleRpc) {
TEST_P(AsyncEnd2endTest, SimpleRpc) {
ResetStub();
SendRpc(1);
}
TEST_F(AsyncEnd2endTest, SequentialRpcs) {
TEST_P(AsyncEnd2endTest, SequentialRpcs) {
ResetStub();
SendRpc(10);
}
// Test a simple RPC using the async version of Next
TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
ResetStub();
EchoRequest send_request;
@ -210,28 +279,28 @@ TEST_F(AsyncEnd2endTest, AsyncNextRpc) {
std::chrono::system_clock::now());
std::chrono::system_clock::time_point time_limit(
std::chrono::system_clock::now() + std::chrono::seconds(10));
Verifier().Verify(cq_.get(), time_now);
Verifier().Verify(cq_.get(), time_now);
Verifier(GetParam()).Verify(cq_.get(), time_now);
Verifier(GetParam()).Verify(cq_.get(), time_now);
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get(), time_limit);
Verifier(GetParam()).Expect(2, true).Verify(cq_.get(), time_limit);
EXPECT_EQ(send_request.message(), recv_request.message());
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(3));
Verifier().Expect(3, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier().Expect(4, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
}
// Two pings and a final pong.
TEST_F(AsyncEnd2endTest, SimpleClientStreaming) {
TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
ResetStub();
EchoRequest send_request;
@ -250,41 +319,41 @@ TEST_F(AsyncEnd2endTest, SimpleClientStreaming) {
service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Expect(1, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Expect(1, true).Verify(cq_.get());
cli_stream->Write(send_request, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
srv_stream.Read(&recv_request, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
cli_stream->Write(send_request, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
srv_stream.Read(&recv_request, tag(6));
Verifier().Expect(6, true).Verify(cq_.get());
Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
cli_stream->WritesDone(tag(7));
Verifier().Expect(7, true).Verify(cq_.get());
Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
srv_stream.Read(&recv_request, tag(8));
Verifier().Expect(8, false).Verify(cq_.get());
Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
send_response.set_message(recv_request.message());
srv_stream.Finish(send_response, Status::OK, tag(9));
Verifier().Expect(9, true).Verify(cq_.get());
Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
cli_stream->Finish(&recv_status, tag(10));
Verifier().Expect(10, true).Verify(cq_.get());
Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
}
// One ping, two pongs.
TEST_F(AsyncEnd2endTest, SimpleServerStreaming) {
TEST_P(AsyncEnd2endTest, SimpleServerStreaming) {
ResetStub();
EchoRequest send_request;
@ -303,38 +372,38 @@ TEST_F(AsyncEnd2endTest, SimpleServerStreaming) {
service_.RequestResponseStream(&srv_ctx, &recv_request, &srv_stream,
cq_.get(), cq_.get(), tag(2));
Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
send_response.set_message(recv_request.message());
srv_stream.Write(send_response, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
cli_stream->Read(&recv_response, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
srv_stream.Write(send_response, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
cli_stream->Read(&recv_response, tag(6));
Verifier().Expect(6, true).Verify(cq_.get());
Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
srv_stream.Finish(Status::OK, tag(7));
Verifier().Expect(7, true).Verify(cq_.get());
Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
cli_stream->Read(&recv_response, tag(8));
Verifier().Expect(8, false).Verify(cq_.get());
Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
cli_stream->Finish(&recv_status, tag(9));
Verifier().Expect(9, true).Verify(cq_.get());
Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
EXPECT_TRUE(recv_status.ok());
}
// One ping, one pong.
TEST_F(AsyncEnd2endTest, SimpleBidiStreaming) {
TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
ResetStub();
EchoRequest send_request;
@ -353,40 +422,40 @@ TEST_F(AsyncEnd2endTest, SimpleBidiStreaming) {
service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(1, true).Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
cli_stream->Write(send_request, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
srv_stream.Read(&recv_request, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
send_response.set_message(recv_request.message());
srv_stream.Write(send_response, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
cli_stream->Read(&recv_response, tag(6));
Verifier().Expect(6, true).Verify(cq_.get());
Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
cli_stream->WritesDone(tag(7));
Verifier().Expect(7, true).Verify(cq_.get());
Verifier(GetParam()).Expect(7, true).Verify(cq_.get());
srv_stream.Read(&recv_request, tag(8));
Verifier().Expect(8, false).Verify(cq_.get());
Verifier(GetParam()).Expect(8, false).Verify(cq_.get());
srv_stream.Finish(Status::OK, tag(9));
Verifier().Expect(9, true).Verify(cq_.get());
Verifier(GetParam()).Expect(9, true).Verify(cq_.get());
cli_stream->Finish(&recv_status, tag(10));
Verifier().Expect(10, true).Verify(cq_.get());
Verifier(GetParam()).Expect(10, true).Verify(cq_.get());
EXPECT_TRUE(recv_status.ok());
}
// Metadata tests
TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) {
TEST_P(AsyncEnd2endTest, ClientInitialMetadataRpc) {
ResetStub();
EchoRequest send_request;
@ -410,7 +479,7 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
auto client_initial_metadata = srv_ctx.client_metadata();
EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second);
@ -420,16 +489,16 @@ TEST_F(AsyncEnd2endTest, ClientInitialMetadataRpc) {
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
}
TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
TEST_P(AsyncEnd2endTest, ServerInitialMetadataRpc) {
ResetStub();
EchoRequest send_request;
@ -451,15 +520,15 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
srv_ctx.AddInitialMetadata(meta1.first, meta1.second);
srv_ctx.AddInitialMetadata(meta2.first, meta2.second);
response_writer.SendInitialMetadata(tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
response_reader->ReadInitialMetadata(tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
EXPECT_EQ(meta1.second, server_initial_metadata.find(meta1.first)->second);
EXPECT_EQ(meta2.second, server_initial_metadata.find(meta2.first)->second);
@ -467,16 +536,16 @@ TEST_F(AsyncEnd2endTest, ServerInitialMetadataRpc) {
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
response_reader->Finish(&recv_response, &recv_status, tag(6));
Verifier().Expect(6, true).Verify(cq_.get());
Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
}
TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
TEST_P(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
ResetStub();
EchoRequest send_request;
@ -498,20 +567,20 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
response_writer.SendInitialMetadata(tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
send_response.set_message(recv_request.message());
srv_ctx.AddTrailingMetadata(meta1.first, meta1.second);
srv_ctx.AddTrailingMetadata(meta2.first, meta2.second);
response_writer.Finish(send_response, Status::OK, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
response_reader->Finish(&recv_response, &recv_status, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@ -520,7 +589,7 @@ TEST_F(AsyncEnd2endTest, ServerTrailingMetadataRpc) {
EXPECT_EQ(static_cast<size_t>(2), server_trailing_metadata.size());
}
TEST_F(AsyncEnd2endTest, MetadataRpc) {
TEST_P(AsyncEnd2endTest, MetadataRpc) {
ResetStub();
EchoRequest send_request;
@ -558,7 +627,7 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
auto client_initial_metadata = srv_ctx.client_metadata();
EXPECT_EQ(meta1.second, client_initial_metadata.find(meta1.first)->second);
@ -568,9 +637,9 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) {
srv_ctx.AddInitialMetadata(meta3.first, meta3.second);
srv_ctx.AddInitialMetadata(meta4.first, meta4.second);
response_writer.SendInitialMetadata(tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
response_reader->ReadInitialMetadata(tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
auto server_initial_metadata = cli_ctx.GetServerInitialMetadata();
EXPECT_EQ(meta3.second, server_initial_metadata.find(meta3.first)->second);
EXPECT_EQ(meta4.second, server_initial_metadata.find(meta4.first)->second);
@ -581,10 +650,10 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) {
srv_ctx.AddTrailingMetadata(meta6.first, meta6.second);
response_writer.Finish(send_response, Status::OK, tag(5));
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
response_reader->Finish(&recv_response, &recv_status, tag(6));
Verifier().Expect(6, true).Verify(cq_.get());
Verifier(GetParam()).Expect(6, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
auto server_trailing_metadata = cli_ctx.GetServerTrailingMetadata();
@ -594,7 +663,7 @@ TEST_F(AsyncEnd2endTest, MetadataRpc) {
}
// Server uses AsyncNotifyWhenDone API to check for cancellation
TEST_F(AsyncEnd2endTest, ServerCheckCancellation) {
TEST_P(AsyncEnd2endTest, ServerCheckCancellation) {
ResetStub();
EchoRequest send_request;
@ -615,21 +684,21 @@ TEST_F(AsyncEnd2endTest, ServerCheckCancellation) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
cli_ctx.TryCancel();
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
EXPECT_TRUE(srv_ctx.IsCancelled());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier().Expect(4, false).Verify(cq_.get());
Verifier(GetParam()).Expect(4, false).Verify(cq_.get());
EXPECT_EQ(StatusCode::CANCELLED, recv_status.error_code());
}
// Server uses AsyncNotifyWhenDone API to check for normal finish
TEST_F(AsyncEnd2endTest, ServerCheckDone) {
TEST_P(AsyncEnd2endTest, ServerCheckDone) {
ResetStub();
EchoRequest send_request;
@ -650,22 +719,24 @@ TEST_F(AsyncEnd2endTest, ServerCheckDone) {
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier().Expect(2, true).Verify(cq_.get());
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(3));
Verifier().Expect(3, true).Verify(cq_.get());
Verifier().Expect(5, true).Verify(cq_.get());
Verifier(GetParam()).Expect(3, true).Verify(cq_.get());
Verifier(GetParam()).Expect(5, true).Verify(cq_.get());
EXPECT_FALSE(srv_ctx.IsCancelled());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier().Expect(4, true).Verify(cq_.get());
Verifier(GetParam()).Expect(4, true).Verify(cq_.get());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
}
INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest, ::testing::Values(false, true));
} // namespace
} // namespace testing
} // namespace grpc

Loading…
Cancel
Save