Merge pull request #23651 from vjpai/revert_callback_alternative

Revert callback-alternative PRs
pull/23653/head
Vijay Pai 5 years ago committed by GitHub
commit b7a7a25a0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      src/core/lib/iomgr/ev_posix.cc
  2. 10
      src/core/lib/iomgr/iomgr.cc
  3. 10
      src/core/lib/iomgr/iomgr.h
  4. 282
      src/core/lib/surface/completion_queue.cc
  5. 8
      src/core/lib/surface/completion_queue.h
  6. 2
      src/core/lib/surface/init.cc
  7. 62
      test/cpp/end2end/client_callback_end2end_test.cc
  8. 86
      test/cpp/end2end/end2end_test.cc
  9. 33
      test/cpp/end2end/message_allocator_end2end_test.cc
  10. 62
      test/cpp/microbenchmarks/bm_cq.cc

@ -37,7 +37,6 @@
#include "src/core/lib/iomgr/ev_epollex_linux.h" #include "src/core/lib/iomgr/ev_epollex_linux.h"
#include "src/core/lib/iomgr/ev_poll_posix.h" #include "src/core/lib/iomgr/ev_poll_posix.h"
#include "src/core/lib/iomgr/internal_errqueue.h" #include "src/core/lib/iomgr/internal_errqueue.h"
#include "src/core/lib/iomgr/iomgr.h"
GPR_GLOBAL_CONFIG_DEFINE_STRING( GPR_GLOBAL_CONFIG_DEFINE_STRING(
grpc_poll_strategy, "all", grpc_poll_strategy, "all",
@ -108,7 +107,6 @@ const grpc_event_engine_vtable* init_non_polling(bool explicit_request) {
auto ret = grpc_init_poll_posix(explicit_request); auto ret = grpc_init_poll_posix(explicit_request);
real_poll_function = grpc_poll_function; real_poll_function = grpc_poll_function;
grpc_poll_function = dummy_poll; grpc_poll_function = dummy_poll;
grpc_iomgr_mark_non_polling_internal();
return ret; return ret;
} }

@ -31,7 +31,6 @@
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h" #include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/global_config.h" #include "src/core/lib/gprpp/global_config.h"
#include "src/core/lib/gprpp/thd.h" #include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/buffer_list.h" #include "src/core/lib/iomgr/buffer_list.h"
@ -51,7 +50,6 @@ static gpr_cv g_rcv;
static int g_shutdown; static int g_shutdown;
static grpc_iomgr_object g_root_object; static grpc_iomgr_object g_root_object;
static bool g_grpc_abort_on_leaks; static bool g_grpc_abort_on_leaks;
static grpc_core::Atomic<bool> g_iomgr_non_polling{false};
void grpc_iomgr_init() { void grpc_iomgr_init() {
grpc_core::ExecCtx exec_ctx; grpc_core::ExecCtx exec_ctx;
@ -194,11 +192,3 @@ void grpc_iomgr_unregister_object(grpc_iomgr_object* obj) {
} }
bool grpc_iomgr_abort_on_leaks(void) { return g_grpc_abort_on_leaks; } bool grpc_iomgr_abort_on_leaks(void) { return g_grpc_abort_on_leaks; }
bool grpc_iomgr_non_polling() {
return g_iomgr_non_polling.Load(grpc_core::MemoryOrder::SEQ_CST);
}
void grpc_iomgr_mark_non_polling_internal() {
g_iomgr_non_polling.Store(true, grpc_core::MemoryOrder::SEQ_CST);
}

@ -45,16 +45,6 @@ void grpc_iomgr_shutdown_background_closure();
*/ */
bool grpc_iomgr_run_in_background(); bool grpc_iomgr_run_in_background();
/* Returns true if polling engine is non-polling, false otherwise.
* Currently only 'none' is non-polling.
*/
bool grpc_iomgr_non_polling();
/* Mark the polling engine as non-polling. For internal use only.
* Currently only 'none' is non-polling.
*/
void grpc_iomgr_mark_non_polling_internal();
/** Returns true if the caller is a worker thread for any background poller. */ /** Returns true if the caller is a worker thread for any background poller. */
bool grpc_iomgr_is_any_background_poller_thread(); bool grpc_iomgr_is_any_background_poller_thread();

@ -39,7 +39,6 @@
#include "src/core/lib/gpr/string.h" #include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/tls.h" #include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gprpp/atomic.h" #include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
@ -209,9 +208,6 @@ struct cq_vtable {
void* reserved); void* reserved);
grpc_event (*pluck)(grpc_completion_queue* cq, void* tag, grpc_event (*pluck)(grpc_completion_queue* cq, void* tag,
gpr_timespec deadline, void* reserved); gpr_timespec deadline, void* reserved);
// TODO(vjpai): Remove proxy_pollset once callback_alternative no longer
// needed.
grpc_pollset* (*proxy_pollset)(grpc_completion_queue* cq);
}; };
namespace { namespace {
@ -313,7 +309,7 @@ struct cq_pluck_data {
}; };
struct cq_callback_data { struct cq_callback_data {
explicit cq_callback_data( cq_callback_data(
grpc_experimental_completion_queue_functor* shutdown_callback) grpc_experimental_completion_queue_functor* shutdown_callback)
: shutdown_callback(shutdown_callback) {} : shutdown_callback(shutdown_callback) {}
@ -338,81 +334,6 @@ struct cq_callback_data {
grpc_experimental_completion_queue_functor* shutdown_callback; grpc_experimental_completion_queue_functor* shutdown_callback;
}; };
// TODO(vjpai): Remove all callback_alternative variants when event manager is
// the only supported poller.
struct cq_callback_alternative_data {
explicit cq_callback_alternative_data(
grpc_experimental_completion_queue_functor* shutdown_callback)
: implementation(SharedNextableCQ()),
shutdown_callback(shutdown_callback) {}
/* This just points to a single shared nextable CQ */
grpc_completion_queue* const implementation;
/** Number of outstanding events (+1 if not shut down)
Initial count is dropped by grpc_completion_queue_shutdown */
grpc_core::Atomic<intptr_t> pending_events{1};
/** 0 initially. 1 once we initiated shutdown */
bool shutdown_called = false;
/** A callback that gets invoked when the CQ completes shutdown */
grpc_experimental_completion_queue_functor* shutdown_callback;
static grpc_completion_queue* SharedNextableCQ() {
grpc_core::MutexLock lock(&*shared_cq_next_mu);
if (shared_cq_next == nullptr) {
shared_cq_next = grpc_completion_queue_create_for_next(nullptr);
int num_nexting_threads = GPR_CLAMP(gpr_cpu_num_cores(), 1, 32);
threads_remaining.Store(num_nexting_threads,
grpc_core::MemoryOrder::RELEASE);
for (int i = 0; i < num_nexting_threads; i++) {
grpc_core::Executor::Run(
GRPC_CLOSURE_CREATE(
[](void* arg, grpc_error* /*error*/) {
grpc_completion_queue* cq =
static_cast<grpc_completion_queue*>(arg);
while (true) {
grpc_event event = grpc_completion_queue_next(
cq, gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
if (event.type == GRPC_QUEUE_SHUTDOWN) {
break;
}
GPR_DEBUG_ASSERT(event.type == GRPC_OP_COMPLETE);
// We can always execute the callback inline rather than
// pushing it to another Executor thread because this
// thread is definitely running on an executor, does not
// hold any application locks before executing the callback,
// and cannot be entered recursively.
auto* functor = static_cast<
grpc_experimental_completion_queue_functor*>(event.tag);
functor->functor_run(functor, event.success);
}
if (threads_remaining.FetchSub(
1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
grpc_completion_queue_destroy(cq);
}
},
shared_cq_next, nullptr),
GRPC_ERROR_NONE, grpc_core::ExecutorType::DEFAULT,
grpc_core::ExecutorJobType::LONG);
}
}
return shared_cq_next;
}
// Use manually-constructed Mutex to avoid static construction issues
static grpc_core::ManualConstructor<grpc_core::Mutex> shared_cq_next_mu;
static grpc_completion_queue*
shared_cq_next; // GUARDED_BY(shared_cq_next_mu)
static grpc_core::Atomic<int> threads_remaining;
};
grpc_core::ManualConstructor<grpc_core::Mutex>
cq_callback_alternative_data::shared_cq_next_mu;
grpc_completion_queue* cq_callback_alternative_data::shared_cq_next = nullptr;
grpc_core::Atomic<int> cq_callback_alternative_data::threads_remaining{0};
} // namespace } // namespace
/* Completion queue structure */ /* Completion queue structure */
@ -425,12 +346,6 @@ struct grpc_completion_queue {
const cq_vtable* vtable; const cq_vtable* vtable;
const cq_poller_vtable* poller_vtable; const cq_poller_vtable* poller_vtable;
// The pollset entry is allowed to enable proxy CQs like the
// callback_alternative.
// TODO(vjpai): Consider removing pollset and reverting to previous
// calculation of pollset once callback_alternative is no longer needed.
grpc_pollset* pollset;
#ifndef NDEBUG #ifndef NDEBUG
void** outstanding_tags; void** outstanding_tags;
size_t outstanding_tag_count; size_t outstanding_tag_count;
@ -445,17 +360,13 @@ struct grpc_completion_queue {
static void cq_finish_shutdown_next(grpc_completion_queue* cq); static void cq_finish_shutdown_next(grpc_completion_queue* cq);
static void cq_finish_shutdown_pluck(grpc_completion_queue* cq); static void cq_finish_shutdown_pluck(grpc_completion_queue* cq);
static void cq_finish_shutdown_callback(grpc_completion_queue* cq); static void cq_finish_shutdown_callback(grpc_completion_queue* cq);
static void cq_finish_shutdown_callback_alternative(grpc_completion_queue* cq);
static void cq_shutdown_next(grpc_completion_queue* cq); static void cq_shutdown_next(grpc_completion_queue* cq);
static void cq_shutdown_pluck(grpc_completion_queue* cq); static void cq_shutdown_pluck(grpc_completion_queue* cq);
static void cq_shutdown_callback(grpc_completion_queue* cq); static void cq_shutdown_callback(grpc_completion_queue* cq);
static void cq_shutdown_callback_alternative(grpc_completion_queue* cq);
static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag); static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag);
static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag); static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag);
static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag); static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* tag);
static bool cq_begin_op_for_callback_alternative(grpc_completion_queue* cq,
void* tag);
// A cq_end_op function is called when an operation on a given CQ with // A cq_end_op function is called when an operation on a given CQ with
// a given tag has completed. The storage argument is a reference to the // a given tag has completed. The storage argument is a reference to the
@ -478,20 +389,12 @@ static void cq_end_op_for_callback(
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg, void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal); grpc_cq_completion* storage, bool internal);
static void cq_end_op_for_callback_alternative(
grpc_completion_queue* cq, void* tag, grpc_error* error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal);
static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline, static grpc_event cq_next(grpc_completion_queue* cq, gpr_timespec deadline,
void* reserved); void* reserved);
static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag, static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
gpr_timespec deadline, void* reserved); gpr_timespec deadline, void* reserved);
static grpc_pollset* cq_proxy_pollset_for_callback_alternative(
grpc_completion_queue* cq);
// Note that cq_init_next and cq_init_pluck do not use the shutdown_callback // Note that cq_init_next and cq_init_pluck do not use the shutdown_callback
static void cq_init_next( static void cq_init_next(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback); void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
@ -499,39 +402,29 @@ static void cq_init_pluck(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback); void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
static void cq_init_callback( static void cq_init_callback(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback); void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
// poller becomes only option.
static void cq_init_callback_alternative(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback);
static void cq_destroy_next(void* data); static void cq_destroy_next(void* data);
static void cq_destroy_pluck(void* data); static void cq_destroy_pluck(void* data);
static void cq_destroy_callback(void* data); static void cq_destroy_callback(void* data);
static void cq_destroy_callback_alternative(void* data);
/* Completion queue vtables based on the completion-type */ /* Completion queue vtables based on the completion-type */
// TODO(vjpai): Make this const again once we stop needing callback_alternative static const cq_vtable g_cq_vtable[] = {
static cq_vtable g_polling_cq_vtable[] = {
/* GRPC_CQ_NEXT */ /* GRPC_CQ_NEXT */
{GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next, {GRPC_CQ_NEXT, sizeof(cq_next_data), cq_init_next, cq_shutdown_next,
cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next, cq_destroy_next, cq_begin_op_for_next, cq_end_op_for_next, cq_next,
nullptr, nullptr}, nullptr},
/* GRPC_CQ_PLUCK */ /* GRPC_CQ_PLUCK */
{GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck, {GRPC_CQ_PLUCK, sizeof(cq_pluck_data), cq_init_pluck, cq_shutdown_pluck,
cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr, cq_destroy_pluck, cq_begin_op_for_pluck, cq_end_op_for_pluck, nullptr,
cq_pluck, nullptr}, cq_pluck},
/* GRPC_CQ_CALLBACK */ /* GRPC_CQ_CALLBACK */
{GRPC_CQ_CALLBACK, sizeof(cq_callback_data), cq_init_callback, {GRPC_CQ_CALLBACK, sizeof(cq_callback_data), cq_init_callback,
cq_shutdown_callback, cq_destroy_callback, cq_begin_op_for_callback, cq_shutdown_callback, cq_destroy_callback, cq_begin_op_for_callback,
cq_end_op_for_callback, nullptr, nullptr, nullptr}, cq_end_op_for_callback, nullptr, nullptr},
}; };
// Separate vtable for non-polling cqs, assign at init
static cq_vtable g_nonpolling_cq_vtable[sizeof(g_polling_cq_vtable) /
sizeof(g_polling_cq_vtable[0])];
#define DATA_FROM_CQ(cq) ((void*)(cq + 1)) #define DATA_FROM_CQ(cq) ((void*)(cq + 1))
#define INLINE_POLLSET_FROM_CQ(cq) \ #define POLLSET_FROM_CQ(cq) \
((grpc_pollset*)(cq->vtable->data_size + (char*)DATA_FROM_CQ(cq))) ((grpc_pollset*)(cq->vtable->data_size + (char*)DATA_FROM_CQ(cq)))
#define POLLSET_FROM_CQ(cq) (cq->pollset)
grpc_core::TraceFlag grpc_cq_pluck_trace(false, "queue_pluck"); grpc_core::TraceFlag grpc_cq_pluck_trace(false, "queue_pluck");
@ -550,46 +443,6 @@ static void on_pollset_shutdown_done(void* cq, grpc_error* error);
void grpc_cq_global_init() { void grpc_cq_global_init() {
gpr_tls_init(&g_cached_event); gpr_tls_init(&g_cached_event);
gpr_tls_init(&g_cached_cq); gpr_tls_init(&g_cached_cq);
g_nonpolling_cq_vtable[GRPC_CQ_NEXT] = g_polling_cq_vtable[GRPC_CQ_NEXT];
g_nonpolling_cq_vtable[GRPC_CQ_PLUCK] = g_polling_cq_vtable[GRPC_CQ_PLUCK];
g_nonpolling_cq_vtable[GRPC_CQ_CALLBACK] =
g_polling_cq_vtable[GRPC_CQ_CALLBACK];
}
// TODO(vjpai): Remove when callback_alternative is no longer needed
void grpc_cq_init() {
// If the iomgr runs in the background, we can use the preferred callback CQ.
// If the iomgr is non-polling, we cannot use the alternative callback CQ.
if (!grpc_iomgr_run_in_background() && !grpc_iomgr_non_polling()) {
cq_callback_alternative_data::shared_cq_next_mu.Init();
g_polling_cq_vtable[GRPC_CQ_CALLBACK] = {
GRPC_CQ_CALLBACK,
sizeof(cq_callback_alternative_data),
cq_init_callback_alternative,
cq_shutdown_callback_alternative,
cq_destroy_callback_alternative,
cq_begin_op_for_callback_alternative,
cq_end_op_for_callback_alternative,
nullptr,
nullptr,
cq_proxy_pollset_for_callback_alternative};
}
}
// TODO(vjpai): Remove when callback_alternative is no longer needed
void grpc_cq_shutdown() {
if (!grpc_iomgr_run_in_background() && !grpc_iomgr_non_polling()) {
{
grpc_core::MutexLock lock(
&*cq_callback_alternative_data::shared_cq_next_mu);
if (cq_callback_alternative_data::shared_cq_next != nullptr) {
grpc_completion_queue_shutdown(
cq_callback_alternative_data::shared_cq_next);
}
cq_callback_alternative_data::shared_cq_next = nullptr;
}
cq_callback_alternative_data::shared_cq_next_mu.Destroy();
}
} }
void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) { void grpc_completion_queue_thread_local_cache_init(grpc_completion_queue* cq) {
@ -668,9 +521,7 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
"polling_type=%d)", "polling_type=%d)",
2, (completion_type, polling_type)); 2, (completion_type, polling_type));
const cq_vtable* vtable = (polling_type == GRPC_CQ_NON_POLLING) const cq_vtable* vtable = &g_cq_vtable[completion_type];
? &g_nonpolling_cq_vtable[completion_type]
: &g_polling_cq_vtable[completion_type];
const cq_poller_vtable* poller_vtable = const cq_poller_vtable* poller_vtable =
&g_poller_vtable_by_poller_type[polling_type]; &g_poller_vtable_by_poller_type[polling_type];
@ -687,18 +538,9 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
/* One for destroy(), one for pollset_shutdown */ /* One for destroy(), one for pollset_shutdown */
new (&cq->owning_refs) grpc_core::RefCount(2); new (&cq->owning_refs) grpc_core::RefCount(2);
poller_vtable->init(POLLSET_FROM_CQ(cq), &cq->mu);
vtable->init(DATA_FROM_CQ(cq), shutdown_callback); vtable->init(DATA_FROM_CQ(cq), shutdown_callback);
// TODO(vjpai): When callback_alternative is no longer needed, cq->pollset can
// be removed and the nullptr proxy_pollset value below can be the definition
// of POLLSET_FROM_CQ.
cq->pollset = cq->vtable->proxy_pollset == nullptr
? INLINE_POLLSET_FROM_CQ(cq)
: cq->vtable->proxy_pollset(cq);
// Init the inline pollset. If a proxy CQ is used, the proxy pollset will be
// init'ed in its CQ init.
cq->poller_vtable->init(INLINE_POLLSET_FROM_CQ(cq), &cq->mu);
GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq, GRPC_CLOSURE_INIT(&cq->pollset_shutdown_done, on_pollset_shutdown_done, cq,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
return cq; return cq;
@ -736,17 +578,6 @@ static void cq_destroy_callback(void* data) {
cqd->~cq_callback_data(); cqd->~cq_callback_data();
} }
static void cq_init_callback_alternative(
void* data, grpc_experimental_completion_queue_functor* shutdown_callback) {
new (data) cq_callback_alternative_data(shutdown_callback);
}
static void cq_destroy_callback_alternative(void* data) {
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*>(data);
cqd->~cq_callback_alternative_data();
}
grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) { grpc_cq_completion_type grpc_get_cq_completion_type(grpc_completion_queue* cq) {
return cq->vtable->cq_completion_type; return cq->vtable->cq_completion_type;
} }
@ -787,9 +618,7 @@ void grpc_cq_internal_unref(grpc_completion_queue* cq) {
#endif #endif
if (GPR_UNLIKELY(cq->owning_refs.Unref(debug_location, reason))) { if (GPR_UNLIKELY(cq->owning_refs.Unref(debug_location, reason))) {
cq->vtable->destroy(DATA_FROM_CQ(cq)); cq->vtable->destroy(DATA_FROM_CQ(cq));
// Only destroy the inlined pollset. If a proxy CQ is used, the proxy cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq));
// pollset will be destroyed by the proxy CQ.
cq->poller_vtable->destroy(INLINE_POLLSET_FROM_CQ(cq));
#ifndef NDEBUG #ifndef NDEBUG
gpr_free(cq->outstanding_tags); gpr_free(cq->outstanding_tags);
#endif #endif
@ -840,14 +669,6 @@ static bool cq_begin_op_for_callback(grpc_completion_queue* cq, void* /*tag*/) {
return cqd->pending_events.IncrementIfNonzero(); return cqd->pending_events.IncrementIfNonzero();
} }
static bool cq_begin_op_for_callback_alternative(grpc_completion_queue* cq,
void* tag) {
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*> DATA_FROM_CQ(cq);
return grpc_cq_begin_op(cqd->implementation, tag) &&
cqd->pending_events.IncrementIfNonzero();
}
bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) { bool grpc_cq_begin_op(grpc_completion_queue* cq, void* tag) {
#ifndef NDEBUG #ifndef NDEBUG
gpr_mu_lock(cq->mu); gpr_mu_lock(cq->mu);
@ -1011,7 +832,7 @@ static void cq_end_op_for_pluck(
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
void functor_callback(void* arg, grpc_error* error) { static void functor_callback(void* arg, grpc_error* error) {
auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(arg); auto* functor = static_cast<grpc_experimental_completion_queue_functor*>(arg);
functor->functor_run(functor, error == GRPC_ERROR_NONE); functor->functor_run(functor, error == GRPC_ERROR_NONE);
} }
@ -1071,40 +892,6 @@ static void cq_end_op_for_callback(
GRPC_CLOSURE_CREATE(functor_callback, functor, nullptr), error); GRPC_CLOSURE_CREATE(functor_callback, functor, nullptr), error);
} }
static void cq_end_op_for_callback_alternative(
grpc_completion_queue* cq, void* tag, grpc_error* error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void* done_arg,
grpc_cq_completion* storage, bool internal) {
GPR_TIMER_SCOPE("cq_end_op_for_callback_alternative", 0);
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*> DATA_FROM_CQ(cq);
if (GRPC_TRACE_FLAG_ENABLED(grpc_api_trace) ||
(GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
error != GRPC_ERROR_NONE)) {
const char* errmsg = grpc_error_string(error);
GRPC_API_TRACE(
"cq_end_op_for_callback_alternative(cq=%p, tag=%p, error=%s, "
"done=%p, done_arg=%p, storage=%p)",
6, (cq, tag, errmsg, done, done_arg, storage));
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures) &&
error != GRPC_ERROR_NONE) {
gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
}
}
// Pass through the actual work to the internal nextable CQ
grpc_cq_end_op(cqd->implementation, tag, error, done, done_arg, storage,
internal);
cq_check_tag(cq, tag, true); /* Used in debug builds only */
if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
cq_finish_shutdown_callback_alternative(cq);
}
}
void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error, void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
void (*done)(void* done_arg, grpc_cq_completion* storage), void (*done)(void* done_arg, grpc_cq_completion* storage),
void* done_arg, grpc_cq_completion* storage, void* done_arg, grpc_cq_completion* storage,
@ -1112,13 +899,6 @@ void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
cq->vtable->end_op(cq, tag, error, done, done_arg, storage, internal); cq->vtable->end_op(cq, tag, error, done, done_arg, storage, internal);
} }
static grpc_pollset* cq_proxy_pollset_for_callback_alternative(
grpc_completion_queue* cq) {
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*>(DATA_FROM_CQ(cq));
return POLLSET_FROM_CQ(cqd->implementation);
}
struct cq_is_finished_arg { struct cq_is_finished_arg {
gpr_atm last_seen_things_queued_ever; gpr_atm last_seen_things_queued_ever;
grpc_completion_queue* cq; grpc_completion_queue* cq;
@ -1599,21 +1379,6 @@ static void cq_finish_shutdown_callback(grpc_completion_queue* cq) {
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
} }
static void cq_finish_shutdown_callback_alternative(grpc_completion_queue* cq) {
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*> DATA_FROM_CQ(cq);
auto* callback = cqd->shutdown_callback;
GPR_ASSERT(cqd->shutdown_called);
// Shutdown the non-proxy pollset
cq->poller_vtable->shutdown(INLINE_POLLSET_FROM_CQ(cq),
&cq->pollset_shutdown_done);
grpc_core::Executor::Run(
GRPC_CLOSURE_CREATE(functor_callback, callback, nullptr),
GRPC_ERROR_NONE);
}
static void cq_shutdown_callback(grpc_completion_queue* cq) { static void cq_shutdown_callback(grpc_completion_queue* cq) {
cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq); cq_callback_data* cqd = static_cast<cq_callback_data*> DATA_FROM_CQ(cq);
@ -1640,33 +1405,6 @@ static void cq_shutdown_callback(grpc_completion_queue* cq) {
GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)"); GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
} }
static void cq_shutdown_callback_alternative(grpc_completion_queue* cq) {
cq_callback_alternative_data* cqd =
static_cast<cq_callback_alternative_data*> DATA_FROM_CQ(cq);
/* Need an extra ref for cq here because:
* We call cq_finish_shutdown_callback() below, which calls pollset shutdown.
* Pollset shutdown decrements the cq ref count which can potentially destroy
* the cq (if that happens to be the last ref).
* Creating an extra ref here prevents the cq from getting destroyed while
* this function is still active */
GRPC_CQ_INTERNAL_REF(cq, "shutting_down (callback cq)");
gpr_mu_lock(cq->mu);
if (cqd->shutdown_called) {
gpr_mu_unlock(cq->mu);
GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
return;
}
cqd->shutdown_called = true;
if (cqd->pending_events.FetchSub(1, grpc_core::MemoryOrder::ACQ_REL) == 1) {
gpr_mu_unlock(cq->mu);
cq_finish_shutdown_callback_alternative(cq);
} else {
gpr_mu_unlock(cq->mu);
}
GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (callback cq)");
}
/* Shutdown simply drops a ref that we reserved at creation time; if we drop /* Shutdown simply drops a ref that we reserved at creation time; if we drop
to zero here, then enter shutdown mode and wake up any waiters */ to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue* cq) { void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {

@ -69,14 +69,6 @@ void grpc_cq_internal_unref(grpc_completion_queue* cc);
/* Initializes global variables used by completion queues */ /* Initializes global variables used by completion queues */
void grpc_cq_global_init(); void grpc_cq_global_init();
// Completion queue initializations that must be done after iomgr
// TODO(vjpai): Remove when callback_alternative is no longer needed.
void grpc_cq_init();
// Completion queue shutdowns that must be done before iomgr shutdown.
// TODO(vjpai): Remove when callback_alternative is no longer needed.
void grpc_cq_shutdown();
/* Flag that an operation is beginning: the completion channel will not finish /* Flag that an operation is beginning: the completion channel will not finish
shutdown until a corrensponding grpc_cq_end_* call is made. shutdown until a corrensponding grpc_cq_end_* call is made.
\a tag is currently used only in debug builds. Return true on success, and \a tag is currently used only in debug builds. Return true on success, and

@ -144,7 +144,6 @@ void grpc_init(void) {
grpc_core::ApplicationCallbackExecCtx::GlobalInit(); grpc_core::ApplicationCallbackExecCtx::GlobalInit();
grpc_core::ExecCtx::GlobalInit(); grpc_core::ExecCtx::GlobalInit();
grpc_iomgr_init(); grpc_iomgr_init();
grpc_cq_init();
gpr_timers_global_init(); gpr_timers_global_init();
grpc_core::HandshakerRegistry::Init(); grpc_core::HandshakerRegistry::Init();
grpc_security_init(); grpc_security_init();
@ -170,7 +169,6 @@ void grpc_shutdown_internal_locked(void) {
int i; int i;
{ {
grpc_core::ExecCtx exec_ctx(0); grpc_core::ExecCtx exec_ctx(0);
grpc_cq_shutdown();
grpc_iomgr_shutdown_background_closure(); grpc_iomgr_shutdown_background_closure();
{ {
grpc_timer_manager_set_threading(false); // shutdown timer_manager thread grpc_timer_manager_set_threading(false); // shutdown timer_manager thread

@ -45,6 +45,17 @@
#include "test/cpp/util/string_ref_helper.h" #include "test/cpp/util/string_ref_helper.h"
#include "test/cpp/util/test_credentials_provider.h" #include "test/cpp/util/test_credentials_provider.h"
// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
// should be skipped based on a decision made at SetUp time. In particular, any
// callback tests can only be run if the iomgr can run in the background or if
// the transport is in-process.
#define MAYBE_SKIP_TEST \
do { \
if (do_not_test_) { \
return; \
} \
} while (0)
namespace grpc { namespace grpc {
namespace testing { namespace testing {
namespace { namespace {
@ -119,6 +130,10 @@ class ClientCallbackEnd2endTest
server_ = builder.BuildAndStart(); server_ = builder.BuildAndStart();
is_server_started_ = true; is_server_started_ = true;
if (GetParam().protocol == Protocol::TCP &&
!grpc_iomgr_run_in_background()) {
do_not_test_ = true;
}
} }
void ResetStub() { void ResetStub() {
@ -352,6 +367,7 @@ class ClientCallbackEnd2endTest
rpc.Await(); rpc.Await();
} }
} }
bool do_not_test_{false};
bool is_server_started_{false}; bool is_server_started_{false};
int picked_port_{0}; int picked_port_{0};
std::shared_ptr<Channel> channel_; std::shared_ptr<Channel> channel_;
@ -364,11 +380,13 @@ class ClientCallbackEnd2endTest
}; };
TEST_P(ClientCallbackEnd2endTest, SimpleRpc) { TEST_P(ClientCallbackEnd2endTest, SimpleRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcs(1, false); SendRpcs(1, false);
} }
TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) { TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -403,6 +421,7 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcExpectedError) {
} }
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) { TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::mutex mu1, mu2, mu3; std::mutex mu1, mu2, mu3;
std::condition_variable cv; std::condition_variable cv;
@ -453,6 +472,7 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLockNested) {
} }
TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) { TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::mutex mu; std::mutex mu;
std::condition_variable cv; std::condition_variable cv;
@ -480,16 +500,19 @@ TEST_P(ClientCallbackEnd2endTest, SimpleRpcUnderLock) {
} }
TEST_P(ClientCallbackEnd2endTest, SequentialRpcs) { TEST_P(ClientCallbackEnd2endTest, SequentialRpcs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcs(10, false); SendRpcs(10, false);
} }
TEST_P(ClientCallbackEnd2endTest, SequentialRpcsRawReq) { TEST_P(ClientCallbackEnd2endTest, SequentialRpcsRawReq) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcsRawReq(10); SendRpcsRawReq(10);
} }
TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) { TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SimpleRequest request; SimpleRequest request;
SimpleResponse response; SimpleResponse response;
@ -516,43 +539,51 @@ TEST_P(ClientCallbackEnd2endTest, SendClientInitialMetadata) {
} }
TEST_P(ClientCallbackEnd2endTest, SimpleRpcWithBinaryMetadata) { TEST_P(ClientCallbackEnd2endTest, SimpleRpcWithBinaryMetadata) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcs(1, true); SendRpcs(1, true);
} }
TEST_P(ClientCallbackEnd2endTest, SequentialRpcsWithVariedBinaryMetadataValue) { TEST_P(ClientCallbackEnd2endTest, SequentialRpcsWithVariedBinaryMetadataValue) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcs(10, true); SendRpcs(10, true);
} }
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) { TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcsGeneric(10, false); SendRpcsGeneric(10, false);
} }
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) { TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true); SendGenericEchoAsBidi(10, 1, /*do_writes_done=*/true);
} }
TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) { TEST_P(ClientCallbackEnd2endTest, SequentialGenericRpcsAsBidiWithReactorReuse) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true); SendGenericEchoAsBidi(10, 10, /*do_writes_done=*/true);
} }
TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) { TEST_P(ClientCallbackEnd2endTest, GenericRpcNoWritesDone) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false); SendGenericEchoAsBidi(1, 1, /*do_writes_done=*/false);
} }
#if GRPC_ALLOW_EXCEPTIONS #if GRPC_ALLOW_EXCEPTIONS
TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) { TEST_P(ClientCallbackEnd2endTest, ExceptingRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpcsGeneric(10, true); SendRpcsGeneric(10, true);
} }
#endif #endif
TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) { TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(10); threads.reserve(10);
@ -565,6 +596,7 @@ TEST_P(ClientCallbackEnd2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
} }
TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) { TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(10); threads.reserve(10);
@ -577,6 +609,7 @@ TEST_P(ClientCallbackEnd2endTest, MultipleRpcs) {
} }
TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) { TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -605,6 +638,7 @@ TEST_P(ClientCallbackEnd2endTest, CancelRpcBeforeStart) {
} }
TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) { TEST_P(ClientCallbackEnd2endTest, RequestEchoServerCancel) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -735,6 +769,7 @@ class WriteClient : public grpc::experimental::ClientWriteReactor<EchoRequest> {
}; };
TEST_P(ClientCallbackEnd2endTest, RequestStream) { TEST_P(ClientCallbackEnd2endTest, RequestStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
WriteClient test{stub_.get(), DO_NOT_CANCEL, 3}; WriteClient test{stub_.get(), DO_NOT_CANCEL, 3};
test.Await(); test.Await();
@ -745,6 +780,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestStream) {
} }
TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) { TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
WriteClient test{stub_.get(), DO_NOT_CANCEL, 3, ClientCancelInfo{2}}; WriteClient test{stub_.get(), DO_NOT_CANCEL, 3, ClientCancelInfo{2}};
test.Await(); test.Await();
@ -756,6 +792,7 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsRequestStream) {
// Server to cancel before doing reading the request // Server to cancel before doing reading the request
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) { TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
WriteClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 1}; WriteClient test{stub_.get(), CANCEL_BEFORE_PROCESSING, 1};
test.Await(); test.Await();
@ -767,6 +804,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelBeforeReads) {
// Server to cancel while reading a request from the stream in parallel // Server to cancel while reading a request from the stream in parallel
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) { TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
WriteClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10}; WriteClient test{stub_.get(), CANCEL_DURING_PROCESSING, 10};
test.Await(); test.Await();
@ -779,6 +817,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelDuringRead) {
// Server to cancel after reading all the requests but before returning to the // Server to cancel after reading all the requests but before returning to the
// client // client
TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) { TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
WriteClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 4}; WriteClient test{stub_.get(), CANCEL_AFTER_PROCESSING, 4};
test.Await(); test.Await();
@ -789,6 +828,7 @@ TEST_P(ClientCallbackEnd2endTest, RequestStreamServerCancelAfterReads) {
} }
TEST_P(ClientCallbackEnd2endTest, UnaryReactor) { TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
class UnaryClient : public grpc::experimental::ClientUnaryReactor { class UnaryClient : public grpc::experimental::ClientUnaryReactor {
public: public:
@ -847,6 +887,7 @@ TEST_P(ClientCallbackEnd2endTest, UnaryReactor) {
} }
TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) { TEST_P(ClientCallbackEnd2endTest, GenericUnaryReactor) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
const std::string kMethodName("/grpc.testing.EchoTestService/Echo"); const std::string kMethodName("/grpc.testing.EchoTestService/Echo");
class UnaryClient : public grpc::experimental::ClientUnaryReactor { class UnaryClient : public grpc::experimental::ClientUnaryReactor {
@ -1012,6 +1053,7 @@ class ReadClient : public grpc::experimental::ClientReadReactor<EchoResponse> {
}; };
TEST_P(ClientCallbackEnd2endTest, ResponseStream) { TEST_P(ClientCallbackEnd2endTest, ResponseStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ReadClient test{stub_.get(), DO_NOT_CANCEL}; ReadClient test{stub_.get(), DO_NOT_CANCEL};
test.Await(); test.Await();
@ -1022,6 +1064,7 @@ TEST_P(ClientCallbackEnd2endTest, ResponseStream) {
} }
TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) { TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ReadClient test{stub_.get(), DO_NOT_CANCEL, ClientCancelInfo{2}}; ReadClient test{stub_.get(), DO_NOT_CANCEL, ClientCancelInfo{2}};
test.Await(); test.Await();
@ -1031,6 +1074,7 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsResponseStream) {
// Server to cancel before sending any response messages // Server to cancel before sending any response messages
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) { TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ReadClient test{stub_.get(), CANCEL_BEFORE_PROCESSING}; ReadClient test{stub_.get(), CANCEL_BEFORE_PROCESSING};
test.Await(); test.Await();
@ -1042,6 +1086,7 @@ TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelBefore) {
// Server to cancel while writing a response to the stream in parallel // Server to cancel while writing a response to the stream in parallel
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) { TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ReadClient test{stub_.get(), CANCEL_DURING_PROCESSING}; ReadClient test{stub_.get(), CANCEL_DURING_PROCESSING};
test.Await(); test.Await();
@ -1054,6 +1099,7 @@ TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelDuring) {
// Server to cancel after writing all the respones to the stream but before // Server to cancel after writing all the respones to the stream but before
// returning to the client // returning to the client
TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelAfter) { TEST_P(ClientCallbackEnd2endTest, ResponseStreamServerCancelAfter) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ReadClient test{stub_.get(), CANCEL_AFTER_PROCESSING}; ReadClient test{stub_.get(), CANCEL_AFTER_PROCESSING};
test.Await(); test.Await();
@ -1218,6 +1264,7 @@ class BidiClient
}; };
TEST_P(ClientCallbackEnd2endTest, BidiStream) { TEST_P(ClientCallbackEnd2endTest, BidiStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL, BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, kServerDefaultResponseStreamsToSend,
@ -1230,6 +1277,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStream) {
} }
TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) { TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL, BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, kServerDefaultResponseStreamsToSend,
@ -1242,6 +1290,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamFirstWriteAsync) {
} }
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) { TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL, BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, kServerDefaultResponseStreamsToSend,
@ -1254,6 +1303,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorked) {
} }
TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) { TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL, BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, kServerDefaultResponseStreamsToSend,
@ -1266,6 +1316,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamCorkedFirstWriteAsync) {
} }
TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) { TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), DO_NOT_CANCEL, BidiClient test(stub_.get(), DO_NOT_CANCEL,
kServerDefaultResponseStreamsToSend, kServerDefaultResponseStreamsToSend,
@ -1280,6 +1331,7 @@ TEST_P(ClientCallbackEnd2endTest, ClientCancelsBidiStream) {
// Server to cancel before reading/writing any requests/responses on the stream // Server to cancel before reading/writing any requests/responses on the stream
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2, BidiClient test(stub_.get(), CANCEL_BEFORE_PROCESSING, /*num_msgs_to_send=*/2,
/*cork_metadata=*/false, /*first_write_async=*/false); /*cork_metadata=*/false, /*first_write_async=*/false);
@ -1293,6 +1345,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelBefore) {
// Server to cancel while reading/writing requests/responses on the stream in // Server to cancel while reading/writing requests/responses on the stream in
// parallel // parallel
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING, BidiClient test(stub_.get(), CANCEL_DURING_PROCESSING,
/*num_msgs_to_send=*/10, /*cork_metadata=*/false, /*num_msgs_to_send=*/10, /*cork_metadata=*/false,
@ -1307,6 +1360,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelDuring) {
// Server to cancel after reading/writing all requests/responses on the stream // Server to cancel after reading/writing all requests/responses on the stream
// but before returning to the client // but before returning to the client
TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) { TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5, BidiClient test(stub_.get(), CANCEL_AFTER_PROCESSING, /*num_msgs_to_send=*/5,
/*cork_metadata=*/false, /*first_write_async=*/false); /*cork_metadata=*/false, /*first_write_async=*/false);
@ -1318,6 +1372,7 @@ TEST_P(ClientCallbackEnd2endTest, BidiStreamServerCancelAfter) {
} }
TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) { TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
class Client : public grpc::experimental::ClientBidiReactor<EchoRequest, class Client : public grpc::experimental::ClientBidiReactor<EchoRequest,
EchoResponse> { EchoResponse> {
@ -1365,6 +1420,7 @@ TEST_P(ClientCallbackEnd2endTest, SimultaneousReadAndWritesDone) {
} }
TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) { TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
MAYBE_SKIP_TEST;
ChannelArguments args; ChannelArguments args;
const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials( const auto& channel_creds = GetCredentialsProvider()->GetChannelCredentials(
GetParam().credentials_type, &args); GetParam().credentials_type, &args);
@ -1399,6 +1455,7 @@ TEST_P(ClientCallbackEnd2endTest, UnimplementedRpc) {
TEST_P(ClientCallbackEnd2endTest, TEST_P(ClientCallbackEnd2endTest,
ResponseStreamExtraReactionFlowReadsUntilDone) { ResponseStreamExtraReactionFlowReadsUntilDone) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
class ReadAllIncomingDataClient class ReadAllIncomingDataClient
: public grpc::experimental::ClientReadReactor<EchoResponse> { : public grpc::experimental::ClientReadReactor<EchoResponse> {
@ -1527,5 +1584,8 @@ INSTANTIATE_TEST_SUITE_P(ClientCallbackEnd2endTest, ClientCallbackEnd2endTest,
int main(int argc, char** argv) { int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
grpc::testing::TestEnvironment env(argc, argv); grpc::testing::TestEnvironment env(argc, argv);
return RUN_ALL_TESTS(); grpc_init();
int ret = RUN_ALL_TESTS();
grpc_shutdown();
return ret;
} }

@ -62,6 +62,17 @@ using grpc::testing::EchoResponse;
using grpc::testing::kTlsCredentialsType; using grpc::testing::kTlsCredentialsType;
using std::chrono::system_clock; using std::chrono::system_clock;
// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
// should be skipped based on a decision made at SetUp time. In particular,
// tests that use the callback server can only be run if the iomgr can run in
// the background or if the transport is in-process.
#define MAYBE_SKIP_TEST \
do { \
if (do_not_test_) { \
return; \
} \
} while (0)
namespace grpc { namespace grpc {
namespace testing { namespace testing {
namespace { namespace {
@ -316,6 +327,14 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
GetParam().Log(); GetParam().Log();
} }
void SetUp() override {
if (GetParam().callback_server && !GetParam().inproc &&
!grpc_iomgr_run_in_background()) {
do_not_test_ = true;
return;
}
}
void TearDown() override { void TearDown() override {
if (is_server_started_) { if (is_server_started_) {
server_->Shutdown(); server_->Shutdown();
@ -450,6 +469,7 @@ class End2endTest : public ::testing::TestWithParam<TestScenario> {
DummyInterceptor::Reset(); DummyInterceptor::Reset();
} }
bool do_not_test_{false};
bool is_server_started_; bool is_server_started_;
std::shared_ptr<Channel> channel_; std::shared_ptr<Channel> channel_;
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_; std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
@ -505,6 +525,7 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL. // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestRequestStreamServerCancel( void TestRequestStreamServerCancel(
ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) { ServerTryCancelRequestPhase server_try_cancel, int num_msgs_to_send) {
MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>()); RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -583,6 +604,7 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL. // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestResponseStreamServerCancel( void TestResponseStreamServerCancel(
ServerTryCancelRequestPhase server_try_cancel) { ServerTryCancelRequestPhase server_try_cancel) {
MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>()); RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -664,6 +686,7 @@ class End2endServerTryCancelTest : public End2endTest {
// NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL. // NOTE: Do not call this function with server_try_cancel == DO_NOT_CANCEL.
void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel, void TestBidiStreamServerCancel(ServerTryCancelRequestPhase server_try_cancel,
int num_messages) { int num_messages) {
MAYBE_SKIP_TEST;
RestartServer(std::shared_ptr<AuthMetadataProcessor>()); RestartServer(std::shared_ptr<AuthMetadataProcessor>());
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -739,6 +762,7 @@ class End2endServerTryCancelTest : public End2endTest {
}; };
TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) { TEST_P(End2endServerTryCancelTest, RequestEchoServerCancel) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -801,6 +825,7 @@ TEST_P(End2endServerTryCancelTest, BidiStreamServerCancelAfter) {
} }
TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) { TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
MAYBE_SKIP_TEST;
// User-Agent is an HTTP header for HTTP transports only // User-Agent is an HTTP header for HTTP transports only
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
@ -824,6 +849,7 @@ TEST_P(End2endTest, SimpleRpcWithCustomUserAgentPrefix) {
} }
TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) { TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(10); threads.reserve(10);
@ -836,6 +862,7 @@ TEST_P(End2endTest, MultipleRpcsWithVariedBinaryMetadataValue) {
} }
TEST_P(End2endTest, MultipleRpcs) { TEST_P(End2endTest, MultipleRpcs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(10); threads.reserve(10);
@ -848,6 +875,7 @@ TEST_P(End2endTest, MultipleRpcs) {
} }
TEST_P(End2endTest, ManyStubs) { TEST_P(End2endTest, ManyStubs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ChannelTestPeer peer(channel_.get()); ChannelTestPeer peer(channel_.get());
int registered_calls_pre = peer.registered_calls(); int registered_calls_pre = peer.registered_calls();
@ -860,6 +888,7 @@ TEST_P(End2endTest, ManyStubs) {
} }
TEST_P(End2endTest, EmptyBinaryMetadata) { TEST_P(End2endTest, EmptyBinaryMetadata) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -872,6 +901,7 @@ TEST_P(End2endTest, EmptyBinaryMetadata) {
} }
TEST_P(End2endTest, ReconnectChannel) { TEST_P(End2endTest, ReconnectChannel) {
MAYBE_SKIP_TEST;
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
} }
@ -899,6 +929,7 @@ TEST_P(End2endTest, ReconnectChannel) {
} }
TEST_P(End2endTest, RequestStreamOneRequest) { TEST_P(End2endTest, RequestStreamOneRequest) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -915,6 +946,7 @@ TEST_P(End2endTest, RequestStreamOneRequest) {
} }
TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) { TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -930,6 +962,7 @@ TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
} }
TEST_P(End2endTest, RequestStreamTwoRequests) { TEST_P(End2endTest, RequestStreamTwoRequests) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -946,6 +979,7 @@ TEST_P(End2endTest, RequestStreamTwoRequests) {
} }
TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) { TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -962,6 +996,7 @@ TEST_P(End2endTest, RequestStreamTwoRequestsWithWriteThrough) {
} }
TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) { TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -978,6 +1013,7 @@ TEST_P(End2endTest, RequestStreamTwoRequestsWithCoalescingApi) {
} }
TEST_P(End2endTest, ResponseStream) { TEST_P(End2endTest, ResponseStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -996,6 +1032,7 @@ TEST_P(End2endTest, ResponseStream) {
} }
TEST_P(End2endTest, ResponseStreamWithCoalescingApi) { TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1017,6 +1054,7 @@ TEST_P(End2endTest, ResponseStreamWithCoalescingApi) {
// This was added to prevent regression from issue: // This was added to prevent regression from issue:
// https://github.com/grpc/grpc/issues/11546 // https://github.com/grpc/grpc/issues/11546
TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) { TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1038,6 +1076,7 @@ TEST_P(End2endTest, ResponseStreamWithEverythingCoalesced) {
} }
TEST_P(End2endTest, BidiStream) { TEST_P(End2endTest, BidiStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1062,6 +1101,7 @@ TEST_P(End2endTest, BidiStream) {
} }
TEST_P(End2endTest, BidiStreamWithCoalescingApi) { TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1097,6 +1137,7 @@ TEST_P(End2endTest, BidiStreamWithCoalescingApi) {
// This was added to prevent regression from issue: // This was added to prevent regression from issue:
// https://github.com/grpc/grpc/issues/11546 // https://github.com/grpc/grpc/issues/11546
TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) { TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1122,6 +1163,7 @@ TEST_P(End2endTest, BidiStreamWithEverythingCoalesced) {
// Talk to the two services with the same name but different package names. // Talk to the two services with the same name but different package names.
// The two stubs are created on the same channel. // The two stubs are created on the same channel.
TEST_P(End2endTest, DiffPackageServices) { TEST_P(End2endTest, DiffPackageServices) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1150,6 +1192,7 @@ void CancelRpc(ClientContext* context, int delay_us, ServiceType* service) {
} }
TEST_P(End2endTest, CancelRpcBeforeStart) { TEST_P(End2endTest, CancelRpcBeforeStart) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1165,6 +1208,7 @@ TEST_P(End2endTest, CancelRpcBeforeStart) {
} }
TEST_P(End2endTest, CancelRpcAfterStart) { TEST_P(End2endTest, CancelRpcAfterStart) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1201,6 +1245,7 @@ TEST_P(End2endTest, CancelRpcAfterStart) {
// Client cancels request stream after sending two messages // Client cancels request stream after sending two messages
TEST_P(End2endTest, ClientCancelsRequestStream) { TEST_P(End2endTest, ClientCancelsRequestStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1224,6 +1269,7 @@ TEST_P(End2endTest, ClientCancelsRequestStream) {
// Client cancels server stream after sending some messages // Client cancels server stream after sending some messages
TEST_P(End2endTest, ClientCancelsResponseStream) { TEST_P(End2endTest, ClientCancelsResponseStream) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1259,6 +1305,7 @@ TEST_P(End2endTest, ClientCancelsResponseStream) {
// Client cancels bidi stream after sending some messages // Client cancels bidi stream after sending some messages
TEST_P(End2endTest, ClientCancelsBidi) { TEST_P(End2endTest, ClientCancelsBidi) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1294,6 +1341,7 @@ TEST_P(End2endTest, ClientCancelsBidi) {
} }
TEST_P(End2endTest, RpcMaxMessageSize) { TEST_P(End2endTest, RpcMaxMessageSize) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1316,6 +1364,7 @@ void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream,
// Run a Read and a WritesDone simultaneously. // Run a Read and a WritesDone simultaneously.
TEST_P(End2endTest, SimultaneousReadWritesDone) { TEST_P(End2endTest, SimultaneousReadWritesDone) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
ClientContext context; ClientContext context;
gpr_event ev; gpr_event ev;
@ -1330,6 +1379,7 @@ TEST_P(End2endTest, SimultaneousReadWritesDone) {
} }
TEST_P(End2endTest, ChannelState) { TEST_P(End2endTest, ChannelState) {
MAYBE_SKIP_TEST;
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
} }
@ -1380,6 +1430,7 @@ TEST_P(End2endTest, ChannelStateTimeout) {
// Talking to a non-existing service. // Talking to a non-existing service.
TEST_P(End2endTest, NonExistingService) { TEST_P(End2endTest, NonExistingService) {
MAYBE_SKIP_TEST;
ResetChannel(); ResetChannel();
std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub; std::unique_ptr<grpc::testing::UnimplementedEchoService::Stub> stub;
stub = grpc::testing::UnimplementedEchoService::NewStub(channel_); stub = grpc::testing::UnimplementedEchoService::NewStub(channel_);
@ -1397,6 +1448,7 @@ TEST_P(End2endTest, NonExistingService) {
// Ask the server to send back a serialized proto in trailer. // Ask the server to send back a serialized proto in trailer.
// This is an example of setting error details. // This is an example of setting error details.
TEST_P(End2endTest, BinaryTrailerTest) { TEST_P(End2endTest, BinaryTrailerTest) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1423,6 +1475,7 @@ TEST_P(End2endTest, BinaryTrailerTest) {
} }
TEST_P(End2endTest, ExpectErrorTest) { TEST_P(End2endTest, ExpectErrorTest) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<ErrorStatus> expected_status; std::vector<ErrorStatus> expected_status;
@ -1474,11 +1527,13 @@ class ProxyEnd2endTest : public End2endTest {
}; };
TEST_P(ProxyEnd2endTest, SimpleRpc) { TEST_P(ProxyEnd2endTest, SimpleRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
SendRpc(stub_.get(), 1, false); SendRpc(stub_.get(), 1, false);
} }
TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) { TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1489,6 +1544,7 @@ TEST_P(ProxyEnd2endTest, SimpleRpcWithEmptyMessages) {
} }
TEST_P(ProxyEnd2endTest, MultipleRpcs) { TEST_P(ProxyEnd2endTest, MultipleRpcs) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
std::vector<std::thread> threads; std::vector<std::thread> threads;
threads.reserve(10); threads.reserve(10);
@ -1502,6 +1558,7 @@ TEST_P(ProxyEnd2endTest, MultipleRpcs) {
// Set a 10us deadline and make sure proper error is returned. // Set a 10us deadline and make sure proper error is returned.
TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) { TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1527,6 +1584,7 @@ TEST_P(ProxyEnd2endTest, RpcDeadlineExpires) {
// Set a long but finite deadline. // Set a long but finite deadline.
TEST_P(ProxyEnd2endTest, RpcLongDeadline) { TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1543,6 +1601,7 @@ TEST_P(ProxyEnd2endTest, RpcLongDeadline) {
// Ask server to echo back the deadline it sees. // Ask server to echo back the deadline it sees.
TEST_P(ProxyEnd2endTest, EchoDeadline) { TEST_P(ProxyEnd2endTest, EchoDeadline) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1568,6 +1627,7 @@ TEST_P(ProxyEnd2endTest, EchoDeadline) {
// Ask server to echo back the deadline it sees. The rpc has no deadline. // Ask server to echo back the deadline it sees. The rpc has no deadline.
TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) { TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1583,6 +1643,7 @@ TEST_P(ProxyEnd2endTest, EchoDeadlineForNoDeadlineRpc) {
} }
TEST_P(ProxyEnd2endTest, UnimplementedRpc) { TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1598,6 +1659,7 @@ TEST_P(ProxyEnd2endTest, UnimplementedRpc) {
// Client cancels rpc after 10ms // Client cancels rpc after 10ms
TEST_P(ProxyEnd2endTest, ClientCancelsRpc) { TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1632,6 +1694,7 @@ TEST_P(ProxyEnd2endTest, ClientCancelsRpc) {
// Server cancels rpc after 1ms // Server cancels rpc after 1ms
TEST_P(ProxyEnd2endTest, ServerCancelsRpc) { TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1646,6 +1709,7 @@ TEST_P(ProxyEnd2endTest, ServerCancelsRpc) {
// Make the response larger than the flow control window. // Make the response larger than the flow control window.
TEST_P(ProxyEnd2endTest, HugeResponse) { TEST_P(ProxyEnd2endTest, HugeResponse) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1663,6 +1727,7 @@ TEST_P(ProxyEnd2endTest, HugeResponse) {
} }
TEST_P(ProxyEnd2endTest, Peer) { TEST_P(ProxyEnd2endTest, Peer) {
MAYBE_SKIP_TEST;
// Peer is not meaningful for inproc // Peer is not meaningful for inproc
if (GetParam().inproc) { if (GetParam().inproc) {
return; return;
@ -1691,6 +1756,7 @@ class SecureEnd2endTest : public End2endTest {
}; };
TEST_P(SecureEnd2endTest, SimpleRpcWithHost) { TEST_P(SecureEnd2endTest, SimpleRpcWithHost) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -1722,6 +1788,7 @@ bool MetadataContains(
} }
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) { TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(true); auto* processor = new TestAuthMetadataProcessor(true);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor)); StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub(); ResetStub();
@ -1747,6 +1814,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorSuccess) {
} }
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) { TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(true); auto* processor = new TestAuthMetadataProcessor(true);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor)); StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub(); ResetStub();
@ -1762,6 +1830,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginAndProcessorFailure) {
} }
TEST_P(SecureEnd2endTest, SetPerCallCredentials) { TEST_P(SecureEnd2endTest, SetPerCallCredentials) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1812,6 +1881,7 @@ class CredentialsInterceptorFactory
}; };
TEST_P(SecureEnd2endTest, CallCredentialsInterception) { TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
MAYBE_SKIP_TEST;
if (!GetParam().use_interceptors) { if (!GetParam().use_interceptors) {
return; return;
} }
@ -1841,6 +1911,7 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterception) {
} }
TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) { TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
MAYBE_SKIP_TEST;
if (!GetParam().use_interceptors) { if (!GetParam().use_interceptors) {
return; return;
} }
@ -1875,6 +1946,7 @@ TEST_P(SecureEnd2endTest, CallCredentialsInterceptionWithSetCredentials) {
} }
TEST_P(SecureEnd2endTest, OverridePerCallCredentials) { TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1912,6 +1984,7 @@ TEST_P(SecureEnd2endTest, OverridePerCallCredentials) {
} }
TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) { TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1932,6 +2005,7 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginKeyFailure) {
} }
TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) { TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -1951,6 +2025,7 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginValueFailure) {
} }
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) { TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
request.mutable_param()->set_skip_cancelled_check(true); request.mutable_param()->set_skip_cancelled_check(true);
@ -1976,6 +2051,7 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithDeadline) {
} }
TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) { TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
request.mutable_param()->set_skip_cancelled_check(true); request.mutable_param()->set_skip_cancelled_check(true);
@ -2004,6 +2080,7 @@ TEST_P(SecureEnd2endTest, AuthMetadataPluginWithCancel) {
} }
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) { TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -2027,6 +2104,7 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginFailure) {
} }
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) { TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(false); auto* processor = new TestAuthMetadataProcessor(false);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor)); StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub(); ResetStub();
@ -2055,6 +2133,7 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorSuccess) {
} }
TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) { TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
MAYBE_SKIP_TEST;
auto* processor = new TestAuthMetadataProcessor(false); auto* processor = new TestAuthMetadataProcessor(false);
StartServer(std::shared_ptr<AuthMetadataProcessor>(processor)); StartServer(std::shared_ptr<AuthMetadataProcessor>(processor));
ResetStub(); ResetStub();
@ -2073,6 +2152,7 @@ TEST_P(SecureEnd2endTest, NonBlockingAuthMetadataPluginAndProcessorFailure) {
} }
TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) { TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -2096,6 +2176,7 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) {
} }
TEST_P(SecureEnd2endTest, CompositeCallCreds) { TEST_P(SecureEnd2endTest, CompositeCallCreds) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -2128,6 +2209,7 @@ TEST_P(SecureEnd2endTest, CompositeCallCreds) {
} }
TEST_P(SecureEnd2endTest, ClientAuthContext) { TEST_P(SecureEnd2endTest, ClientAuthContext) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
EchoResponse response; EchoResponse response;
@ -2172,6 +2254,7 @@ class ResourceQuotaEnd2endTest : public End2endTest {
}; };
TEST_P(ResourceQuotaEnd2endTest, SimpleRequest) { TEST_P(ResourceQuotaEnd2endTest, SimpleRequest) {
MAYBE_SKIP_TEST;
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;
@ -2269,5 +2352,6 @@ INSTANTIATE_TEST_SUITE_P(
int main(int argc, char** argv) { int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv); grpc::testing::TestEnvironment env(argc, argv);
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS(); int ret = RUN_ALL_TESTS();
return ret;
} }

@ -45,6 +45,17 @@
#include "test/core/util/test_config.h" #include "test/core/util/test_config.h"
#include "test/cpp/util/test_credentials_provider.h" #include "test/cpp/util/test_credentials_provider.h"
// MAYBE_SKIP_TEST is a macro to determine if this particular test configuration
// should be skipped based on a decision made at SetUp time. In particular, any
// callback tests can only be run if the iomgr can run in the background or if
// the transport is in-process.
#define MAYBE_SKIP_TEST \
do { \
if (do_not_test_) { \
return; \
} \
} while (0)
namespace grpc { namespace grpc {
namespace testing { namespace testing {
namespace { namespace {
@ -106,7 +117,15 @@ void TestScenario::Log() const {
class MessageAllocatorEnd2endTestBase class MessageAllocatorEnd2endTestBase
: public ::testing::TestWithParam<TestScenario> { : public ::testing::TestWithParam<TestScenario> {
protected: protected:
MessageAllocatorEnd2endTestBase() { GetParam().Log(); } MessageAllocatorEnd2endTestBase() {
GetParam().Log();
if (GetParam().protocol == Protocol::TCP) {
if (!grpc_iomgr_run_in_background()) {
do_not_test_ = true;
return;
}
}
}
~MessageAllocatorEnd2endTestBase() = default; ~MessageAllocatorEnd2endTestBase() = default;
@ -191,6 +210,7 @@ class MessageAllocatorEnd2endTestBase
} }
} }
bool do_not_test_{false};
int picked_port_{0}; int picked_port_{0};
std::shared_ptr<Channel> channel_; std::shared_ptr<Channel> channel_;
std::unique_ptr<EchoTestService::Stub> stub_; std::unique_ptr<EchoTestService::Stub> stub_;
@ -202,6 +222,7 @@ class MessageAllocatorEnd2endTestBase
class NullAllocatorTest : public MessageAllocatorEnd2endTestBase {}; class NullAllocatorTest : public MessageAllocatorEnd2endTestBase {};
TEST_P(NullAllocatorTest, SimpleRpc) { TEST_P(NullAllocatorTest, SimpleRpc) {
MAYBE_SKIP_TEST;
CreateServer(nullptr); CreateServer(nullptr);
ResetStub(); ResetStub();
SendRpcs(1); SendRpcs(1);
@ -257,6 +278,7 @@ class SimpleAllocatorTest : public MessageAllocatorEnd2endTestBase {
}; };
TEST_P(SimpleAllocatorTest, SimpleRpc) { TEST_P(SimpleAllocatorTest, SimpleRpc) {
MAYBE_SKIP_TEST;
const int kRpcCount = 10; const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator); std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
CreateServer(allocator.get()); CreateServer(allocator.get());
@ -271,6 +293,7 @@ TEST_P(SimpleAllocatorTest, SimpleRpc) {
} }
TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) { TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) {
MAYBE_SKIP_TEST;
const int kRpcCount = 10; const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator); std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
auto mutator = [](experimental::RpcAllocatorState* allocator_state, auto mutator = [](experimental::RpcAllocatorState* allocator_state,
@ -295,6 +318,7 @@ TEST_P(SimpleAllocatorTest, RpcWithEarlyFreeRequest) {
} }
TEST_P(SimpleAllocatorTest, RpcWithReleaseRequest) { TEST_P(SimpleAllocatorTest, RpcWithReleaseRequest) {
MAYBE_SKIP_TEST;
const int kRpcCount = 10; const int kRpcCount = 10;
std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator); std::unique_ptr<SimpleAllocator> allocator(new SimpleAllocator);
std::vector<EchoRequest*> released_requests; std::vector<EchoRequest*> released_requests;
@ -354,6 +378,7 @@ class ArenaAllocatorTest : public MessageAllocatorEnd2endTestBase {
}; };
TEST_P(ArenaAllocatorTest, SimpleRpc) { TEST_P(ArenaAllocatorTest, SimpleRpc) {
MAYBE_SKIP_TEST;
const int kRpcCount = 10; const int kRpcCount = 10;
std::unique_ptr<ArenaAllocator> allocator(new ArenaAllocator); std::unique_ptr<ArenaAllocator> allocator(new ArenaAllocator);
CreateServer(allocator.get()); CreateServer(allocator.get());
@ -404,6 +429,10 @@ INSTANTIATE_TEST_SUITE_P(ArenaAllocatorTest, ArenaAllocatorTest,
int main(int argc, char** argv) { int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(argc, argv); grpc::testing::TestEnvironment env(argc, argv);
// The grpc_init is to cover the MAYBE_SKIP_TEST.
grpc_init();
::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS(); int ret = RUN_ALL_TESTS();
grpc_shutdown();
return ret;
} }

@ -69,11 +69,6 @@ BENCHMARK(BM_CreateDestroyCore);
static void DoneWithCompletionOnStack(void* /*arg*/, static void DoneWithCompletionOnStack(void* /*arg*/,
grpc_cq_completion* /*completion*/) {} grpc_cq_completion* /*completion*/) {}
static void DoneWithCompletionOnHeap(void* /*arg*/,
grpc_cq_completion* completion) {
delete completion;
}
class DummyTag final : public internal::CompletionQueueTag { class DummyTag final : public internal::CompletionQueueTag {
public: public:
bool FinalizeResult(void** /*tag*/, bool* /*status*/) override { bool FinalizeResult(void** /*tag*/, bool* /*status*/) override {
@ -210,15 +205,8 @@ static void BM_Callback_CQ_Pass1Core(benchmark::State& state) {
gpr_cv_init(&shutdown_cv); gpr_cv_init(&shutdown_cv);
bool got_shutdown = false; bool got_shutdown = false;
ShutdownCallback shutdown_cb(&got_shutdown); ShutdownCallback shutdown_cb(&got_shutdown);
// This test with stack-allocated completions only works for non-polling or grpc_completion_queue* cc =
// EM-polling callback core CQs. For generality, test with non-polling. grpc_completion_queue_create_for_callback(&shutdown_cb, nullptr);
grpc_completion_queue_attributes attr;
attr.version = 2;
attr.cq_completion_type = GRPC_CQ_CALLBACK;
attr.cq_polling_type = GRPC_CQ_NON_POLLING;
attr.cq_shutdown_cb = &shutdown_cb;
grpc_completion_queue* cc = grpc_completion_queue_create(
grpc_completion_queue_factory_lookup(&attr), &attr, nullptr);
for (auto _ : state) { for (auto _ : state) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx; grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx; grpc_core::ExecCtx exec_ctx;
@ -252,53 +240,7 @@ static void BM_Callback_CQ_Pass1Core(benchmark::State& state) {
gpr_cv_destroy(&shutdown_cv); gpr_cv_destroy(&shutdown_cv);
gpr_mu_destroy(&shutdown_mu); gpr_mu_destroy(&shutdown_mu);
} }
static void BM_Callback_CQ_Pass1CoreHeapCompletion(benchmark::State& state) {
TrackCounters track_counters;
int iteration = 0, current_iterations = 0;
TagCallback tag_cb(&iteration);
gpr_mu_init(&mu);
gpr_cv_init(&cv);
gpr_mu_init(&shutdown_mu);
gpr_cv_init(&shutdown_cv);
bool got_shutdown = false;
ShutdownCallback shutdown_cb(&got_shutdown);
grpc_completion_queue* cc =
grpc_completion_queue_create_for_callback(&shutdown_cb, nullptr);
for (auto _ : state) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
grpc_cq_completion* completion = new grpc_cq_completion;
GPR_ASSERT(grpc_cq_begin_op(cc, &tag_cb));
grpc_cq_end_op(cc, &tag_cb, GRPC_ERROR_NONE, DoneWithCompletionOnHeap,
nullptr, completion);
}
shutdown_and_destroy(cc);
gpr_mu_lock(&mu);
current_iterations = static_cast<int>(state.iterations());
while (current_iterations != iteration) {
// Wait for all the callbacks to complete.
gpr_cv_wait(&cv, &mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&mu);
gpr_mu_lock(&shutdown_mu);
while (!got_shutdown) {
// Wait for the shutdown callback to complete.
gpr_cv_wait(&shutdown_cv, &shutdown_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(&shutdown_mu);
GPR_ASSERT(got_shutdown);
GPR_ASSERT(iteration == static_cast<int>(state.iterations()));
track_counters.Finish(state);
gpr_cv_destroy(&cv);
gpr_mu_destroy(&mu);
gpr_cv_destroy(&shutdown_cv);
gpr_mu_destroy(&shutdown_mu);
}
BENCHMARK(BM_Callback_CQ_Pass1Core); BENCHMARK(BM_Callback_CQ_Pass1Core);
BENCHMARK(BM_Callback_CQ_Pass1CoreHeapCompletion);
} // namespace testing } // namespace testing
} // namespace grpc } // namespace grpc

Loading…
Cancel
Save