Add a test of non-blocking API behavior

... also fix things that were broken :)
pull/2818/head
Craig Tiller 10 years ago
parent 69f90e6382
commit 4c06b820e0
  1. 20
      src/core/iomgr/pollset_posix.c
  2. 5
      src/core/iomgr/pollset_posix.h
  3. 2
      src/core/security/google_default_credentials.c
  4. 1
      src/core/surface/completion_queue.c
  5. 6
      test/core/httpcli/httpcli_test.c
  6. 9
      test/core/iomgr/endpoint_tests.c
  7. 12
      test/core/iomgr/fd_posix_test.c
  8. 9
      test/core/iomgr/tcp_client_posix_test.c
  9. 15
      test/core/iomgr/tcp_posix_test.c
  10. 3
      test/core/iomgr/tcp_server_posix_test.c
  11. 2
      test/core/security/oauth2_utils.c
  12. 3
      test/core/util/reconnect_server.c
  13. 73
      test/cpp/end2end/async_end2end_test.cc

@ -90,6 +90,7 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
}
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
/* pollset->mu already held */
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker = p->root_worker.next;
@ -141,10 +142,10 @@ void grpc_pollset_init(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->add_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
@ -154,10 +155,10 @@ void grpc_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
void grpc_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
pollset->vtable->del_fd(pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to del_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
@ -170,9 +171,8 @@ static void finish_shutdown(grpc_pollset *pollset) {
}
void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
gpr_timespec now, gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
int added_worker = 0;
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;

@ -104,7 +104,8 @@ void grpc_kick_drain(grpc_pollset *p);
- longer than a millisecond polls are rounded up to the next nearest
millisecond to avoid spinning
- infinite timeouts are converted to -1 */
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline, gpr_timespec now);
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now);
/* turn a pollset into a multipoller: platform specific */
typedef void (*grpc_platform_become_multipoller_type)(grpc_pollset *pollset,
@ -120,7 +121,7 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
int grpc_pollset_has_workers(grpc_pollset *pollset);
/* override to allow tests to hook poll() usage */
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
typedef int (*grpc_poll_function_type)(struct pollfd *, nfds_t, int);
extern grpc_poll_function_type grpc_poll_function;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */

@ -113,7 +113,7 @@ static int is_stack_running_on_compute_engine(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
grpc_pollset_worker worker;
grpc_pollset_work(&detector.pollset, &worker,
grpc_pollset_work(&detector.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));

@ -237,6 +237,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *c;
grpc_cq_completion *prev;
grpc_pollset_worker worker;
gpr_timespec now;
int first_loop = 1;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

@ -88,7 +88,8 @@ static void test_get(int use_ssl, int port) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
n_seconds_time(20));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);
@ -114,7 +115,8 @@ static void test_post(int use_ssl, int port) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
n_seconds_time(20));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);

@ -256,7 +256,8 @@ static void read_and_write_test(grpc_endpoint_test_config config,
while (!state.read_done || !state.write_done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
grpc_pollset_work(g_pollset, &worker, deadline);
grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
@ -353,7 +354,8 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config,
while (!write_st.done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, &worker, deadline);
grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
grpc_endpoint_destroy(write_st.ep);
@ -361,7 +363,8 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config,
while (!read_st.done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, &worker, deadline);
grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
gpr_free(slices);

@ -250,7 +250,8 @@ static void server_wait_and_shutdown(server *sv) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!sv->done) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
@ -358,7 +359,8 @@ static void client_wait_and_shutdown(client *cl) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!cl->done) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
@ -448,7 +450,8 @@ static void test_grpc_fd_change(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (a.cb_that_ran == NULL) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
GPR_ASSERT(a.cb_that_ran == first_read_callback);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -467,7 +470,8 @@ static void test_grpc_fd_change(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (b.cb_that_ran == NULL) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
/* Except now we verify that second_read_callback ran instead */
GPR_ASSERT(b.cb_that_ran == second_read_callback);

@ -112,7 +112,8 @@ void test_succeeds(void) {
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -142,7 +143,8 @@ void test_fails(void) {
/* wait for the connection callback to finish */
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, test_deadline());
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
test_deadline());
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -211,7 +213,8 @@ void test_times_out(void) {
GPR_ASSERT(g_connections_complete ==
connections_complete_before + is_after_deadline);
}
grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

@ -187,7 +187,8 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -224,7 +225,8 @@ static void large_read_test(ssize_t slice_size) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -285,7 +287,8 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
for (;;) {
grpc_pollset_worker worker;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
do {
bytes_read =
@ -365,7 +368,8 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) {
if (state.write_done) {
break;
}
grpc_pollset_work(&g_pollset, &worker, deadline);
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
@ -422,7 +426,8 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) {
if (state.write_done) {
break;
}
grpc_pollset_work(&g_pollset, &worker, deadline);
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
break;

@ -137,7 +137,8 @@ static void test_connect(int n) {
while (g_nconnects == nconnects_before &&
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
}
gpr_log(GPR_DEBUG, "wait done");

@ -85,7 +85,7 @@ char *grpc_test_fetch_oauth2_token_with_credentials(grpc_credentials *creds) {
gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
while (!request.is_done) {
grpc_pollset_worker worker;
grpc_pollset_work(&request.pollset, &worker,
grpc_pollset_work(&request.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));

@ -134,7 +134,8 @@ void reconnect_server_poll(reconnect_server *server, int seconds) {
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(seconds, GPR_TIMESPAN));
gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
grpc_pollset_work(&server->pollset, &worker, deadline);
grpc_pollset_work(&server->pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
}

@ -69,11 +69,11 @@ namespace testing {
namespace {
void* tag(int i) { return (void*)(gpr_intptr) i; }
void* tag(int i) { return (void*)(gpr_intptr)i; }
#ifdef GPR_POSIX_SOCKET
static int assert_non_blocking_poll(
struct pollfd *pfds, nfds_t nfds, int timeout) {
static int assert_non_blocking_poll(struct pollfd* pfds, nfds_t nfds,
int timeout) {
GPR_ASSERT(timeout == 0);
return poll(pfds, nfds, timeout);
}
@ -85,9 +85,7 @@ class PollOverride {
grpc_poll_function = f;
}
~PollOverride() {
grpc_poll_function = prev_;
}
~PollOverride() { grpc_poll_function = prev_; }
private:
grpc_poll_function_type prev_;
@ -95,7 +93,7 @@ class PollOverride {
class PollingCheckRegion : public PollOverride {
public:
explicit PollingCheckRegion(bool allow_blocking)
explicit PollingCheckRegion(bool allow_blocking)
: PollOverride(allow_blocking ? poll : assert_non_blocking_poll) {}
};
#else
@ -112,8 +110,7 @@ class Verifier : public PollingCheckRegion {
expectations_[tag(i)] = expect_ok;
return *this;
}
void Verify(CompletionQueue *cq) {
if (spin_) gpr_log(GPR_DEBUG, "spin");
void Verify(CompletionQueue* cq) {
GPR_ASSERT(!expectations_.empty());
while (!expectations_.empty()) {
bool ok;
@ -135,33 +132,38 @@ class Verifier : public PollingCheckRegion {
expectations_.erase(it);
}
}
void Verify(CompletionQueue *cq, std::chrono::system_clock::time_point deadline) {
if (spin_) gpr_log(GPR_DEBUG, "spin");
void Verify(CompletionQueue* cq,
std::chrono::system_clock::time_point deadline) {
if (expectations_.empty()) {
bool ok;
void *got_tag;
void* got_tag;
if (spin_) {
while (std::chrono::system_clock::now() < deadline) {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME)), CompletionQueue::TIMEOUT);
EXPECT_EQ(
cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME)),
CompletionQueue::TIMEOUT);
}
} else {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::TIMEOUT);
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
CompletionQueue::TIMEOUT);
}
} else {
while (!expectations_.empty()) {
bool ok;
void *got_tag;
void* got_tag;
if (spin_) {
for (;;) {
GPR_ASSERT(std::chrono::system_clock::now() < deadline);
auto r = cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
auto r =
cq->AsyncNext(&got_tag, &ok, gpr_time_0(GPR_CLOCK_REALTIME));
if (r == CompletionQueue::TIMEOUT) continue;
if (r == CompletionQueue::GOT_EVENT) break;
gpr_log(GPR_ERROR, "unexpected result from AsyncNext");
abort();
}
}
} else {
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline), CompletionQueue::GOT_EVENT);
EXPECT_EQ(cq->AsyncNext(&got_tag, &ok, deadline),
CompletionQueue::GOT_EVENT);
}
auto it = expectations_.find(got_tag);
EXPECT_TRUE(it != expectations_.end());
@ -185,7 +187,8 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
server_address_ << "localhost:" << port;
// Setup server
ServerBuilder builder;
builder.AddListeningPort(server_address_.str(), grpc::InsecureServerCredentials());
builder.AddListeningPort(server_address_.str(),
grpc::InsecureServerCredentials());
builder.RegisterAsyncService(&service_);
cq_ = builder.AddCompletionQueue();
server_ = builder.BuildAndStart();
@ -222,8 +225,8 @@ class AsyncEnd2endTest : public ::testing::TestWithParam<bool> {
std::unique_ptr<ClientAsyncResponseReader<EchoResponse> > response_reader(
stub_->AsyncEcho(&cli_ctx, send_request, cq_.get()));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer,
cq_.get(), cq_.get(), tag(2));
service_.RequestEcho(&srv_ctx, &recv_request, &response_writer, cq_.get(),
cq_.get(), tag(2));
Verifier(GetParam()).Expect(2, true).Verify(cq_.get());
EXPECT_EQ(send_request.message(), recv_request.message());
@ -290,10 +293,14 @@ TEST_P(AsyncEnd2endTest, AsyncNextRpc) {
send_response.set_message(recv_request.message());
response_writer.Finish(send_response, Status::OK, tag(3));
Verifier(GetParam()).Expect(3, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
Verifier(GetParam())
.Expect(3, true)
.Verify(cq_.get(), std::chrono::system_clock::time_point::max());
response_reader->Finish(&recv_response, &recv_status, tag(4));
Verifier(GetParam()).Expect(4, true).Verify(cq_.get(), std::chrono::system_clock::time_point::max());
Verifier(GetParam())
.Expect(4, true)
.Verify(cq_.get(), std::chrono::system_clock::time_point::max());
EXPECT_EQ(send_response.message(), recv_response.message());
EXPECT_TRUE(recv_status.ok());
@ -316,8 +323,8 @@ TEST_P(AsyncEnd2endTest, SimpleClientStreaming) {
std::unique_ptr<ClientAsyncWriter<EchoRequest> > cli_stream(
stub_->AsyncRequestStream(&cli_ctx, &recv_response, cq_.get(), tag(1)));
service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(),
cq_.get(), tag(2));
service_.RequestRequestStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
Verifier(GetParam()).Expect(2, true).Expect(1, true).Verify(cq_.get());
@ -419,8 +426,8 @@ TEST_P(AsyncEnd2endTest, SimpleBidiStreaming) {
std::unique_ptr<ClientAsyncReaderWriter<EchoRequest, EchoResponse> >
cli_stream(stub_->AsyncBidiStream(&cli_ctx, cq_.get(), tag(1)));
service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(),
cq_.get(), tag(2));
service_.RequestBidiStream(&srv_ctx, &srv_stream, cq_.get(), cq_.get(),
tag(2));
Verifier(GetParam()).Expect(1, true).Expect(2, true).Verify(cq_.get());
@ -606,18 +613,17 @@ TEST_P(AsyncEnd2endTest, MetadataRpc) {
std::pair<grpc::string, grpc::string> meta1("key1", "val1");
std::pair<grpc::string, grpc::string> meta2(
"key2-bin",
grpc::string("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc",
13));
grpc::string("\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc", 13));
std::pair<grpc::string, grpc::string> meta3("key3", "val3");
std::pair<grpc::string, grpc::string> meta6(
"key4-bin",
grpc::string("\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d",
14));
14));
std::pair<grpc::string, grpc::string> meta5("key5", "val5");
std::pair<grpc::string, grpc::string> meta4(
"key6-bin",
grpc::string("\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee",
15));
grpc::string(
"\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee", 15));
cli_ctx.AddMetadata(meta1.first, meta1.second);
cli_ctx.AddMetadata(meta2.first, meta2.second);
@ -735,7 +741,8 @@ TEST_P(AsyncEnd2endTest, ServerCheckDone) {
EXPECT_TRUE(recv_status.ok());
}
INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest, ::testing::Values(false, true));
INSTANTIATE_TEST_CASE_P(AsyncEnd2end, AsyncEnd2endTest,
::testing::Values(false, true));
} // namespace
} // namespace testing

Loading…
Cancel
Save