Provide an interface firewall between pollset and its implementations

Starting to allow for >1 implementation of pollset within a binary.

Do so without requiring an extra allocation for completion queues (which
we could not tolerate).
pull/5386/head
Craig Tiller 9 years ago
parent 3633ce48a9
commit a8be91b315
  1. 6
      src/core/iomgr/fd_posix.c
  2. 15
      src/core/iomgr/pollset.h
  3. 9
      src/core/iomgr/pollset_multipoller_with_epoll.c
  4. 12
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  5. 45
      src/core/iomgr/pollset_posix.c
  6. 14
      src/core/iomgr/pollset_posix.h
  7. 1
      src/core/iomgr/pollset_set_posix.c
  8. 1
      src/core/iomgr/pollset_set_posix.h
  9. 2
      src/core/iomgr/pollset_windows.h
  10. 5
      src/core/iomgr/tcp_posix.c
  11. 1
      src/core/iomgr/workqueue_posix.c
  12. 2
      src/core/iomgr/workqueue_posix.h
  13. 23
      src/core/security/google_default_credentials.c
  14. 84
      src/core/surface/completion_queue.c
  15. 2
      test/core/client_config/set_initial_connect_string_test.c
  16. 16
      test/core/end2end/fixtures/h2_full+poll+pipe.c
  17. 14
      test/core/end2end/fixtures/h2_full+poll.c
  18. 12
      test/core/end2end/fixtures/h2_ssl+poll.c
  19. 39
      test/core/end2end/fixtures/h2_uchannel.c
  20. 16
      test/core/end2end/fixtures/h2_uds+poll.c
  21. 46
      test/core/httpcli/httpcli_test.c
  22. 45
      test/core/httpcli/httpscli_test.c
  23. 21
      test/core/iomgr/endpoint_pair_test.c
  24. 18
      test/core/iomgr/endpoint_tests.c
  25. 2
      test/core/iomgr/endpoint_tests.h
  26. 91
      test/core/iomgr/fd_posix_test.c
  27. 64
      test/core/iomgr/tcp_client_posix_test.c
  28. 97
      test/core/iomgr/tcp_posix_test.c
  29. 52
      test/core/iomgr/tcp_server_posix_test.c
  30. 41
      test/core/iomgr/workqueue_test.c
  31. 25
      test/core/security/oauth2_utils.c
  32. 32
      test/core/security/print_google_default_creds_token.c
  33. 28
      test/core/security/secure_endpoint_test.c
  34. 31
      test/core/security/verify_jwt.c
  35. 57
      test/core/util/port_posix.c
  36. 19
      test/core/util/test_tcp_server.c
  37. 4
      test/core/util/test_tcp_server.h

@ -46,6 +46,8 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/pollset_posix.h"
#define CLOSURE_NOT_READY ((grpc_closure *)0)
#define CLOSURE_READY ((grpc_closure *)1)
@ -175,11 +177,11 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
}
static void pollset_kick_locked(grpc_fd_watcher *watcher) {
gpr_mu_lock(GRPC_POLLSET_MU(watcher->pollset));
gpr_mu_lock(watcher->pollset->mu);
GPR_ASSERT(watcher->worker);
grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(GRPC_POLLSET_MU(watcher->pollset));
gpr_mu_unlock(watcher->pollset->mu);
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {

@ -35,8 +35,11 @@
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_H
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/exec_ctx.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/* A grpc_pollset is a set of file descriptors that a higher level item is
@ -46,15 +49,11 @@
- a completion queue might keep a pollset with an entry for each transport
that is servicing a call that it's tracking */
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_posix.h"
#endif
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_windows.h"
#endif
typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker grpc_pollset_worker;
void grpc_pollset_init(grpc_pollset *pollset);
size_t grpc_pollset_size(void);
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu *mu);
/* Begin shutting down the pollset, and call closure when done.
* GRPC_POLLSET_MU(pollset) must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,

@ -45,6 +45,7 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"
@ -148,7 +149,7 @@ static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
finally_add_fd(exec_ctx, da->pollset, da->fd);
}
gpr_mu_lock(&da->pollset->mu);
gpr_mu_lock(da->pollset->mu);
da->pollset->in_flight_cbs--;
if (da->pollset->shutting_down) {
/* We don't care about this pollset anymore. */
@ -157,7 +158,7 @@ static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, true, NULL);
}
}
gpr_mu_unlock(&da->pollset->mu);
gpr_mu_unlock(da->pollset->mu);
GRPC_FD_UNREF(da->fd, "delayed_add");
@ -169,7 +170,7 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
grpc_fd *fd,
int and_unlock_pollset) {
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
finally_add_fd(exec_ctx, pollset, fd);
} else {
delayed_add *da = gpr_malloc(sizeof(*da));
@ -201,7 +202,7 @@ static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
* here.
*/
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);

@ -42,13 +42,15 @@
#include <stdlib.h>
#include <string.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/support/block_annotate.h"
typedef struct {
/* all polled fds */
size_t fd_count;
@ -78,7 +80,7 @@ static void multipoll_with_poll_pollset_add_fd(grpc_exec_ctx *exec_ctx,
GRPC_FD_REF(fd, "multipoller");
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
}
}
@ -130,7 +132,7 @@ static void multipoll_with_poll_pollset_maybe_work_and_unlock(
}
h->del_count = 0;
h->fd_count = fd_count;
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
for (i = 2; i < pfd_count; i++) {
pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,

@ -42,16 +42,16 @@
#include <string.h>
#include <unistd.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"
GPR_TLS_DECL(g_current_thread_poller);
GPR_TLS_DECL(g_current_thread_worker);
@ -97,6 +97,8 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next->prev = worker;
}
size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
@ -186,8 +188,8 @@ void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
void grpc_pollset_init(grpc_pollset *pollset) {
gpr_mu_init(&pollset->mu);
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu *mu) {
pollset->mu = mu;
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
@ -204,7 +206,6 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
while (pollset->local_wakeup_cache) {
grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
@ -227,15 +228,15 @@ void grpc_pollset_reset(grpc_pollset *pollset) {
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_fd *fd) {
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(pollset->mu);
pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
/* the following (enabled only in debug) will reacquire and then release
our lock - meaning that if the unlocking flag passed to add_fd above is
not respected, the code will deadlock (in a way that we have a chance of
debugging) */
#ifndef NDEBUG
gpr_mu_lock(&pollset->mu);
gpr_mu_unlock(&pollset->mu);
gpr_mu_lock(pollset->mu);
gpr_mu_unlock(pollset->mu);
#endif
}
@ -284,7 +285,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
/* Give do_promote priority so we don't starve it out */
if (pollset->in_flight_cbs) {
GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
locked = 0;
goto done;
}
@ -318,7 +319,7 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
done:
if (!locked) {
queued_work |= grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(pollset->mu);
locked = 1;
}
/* If we're forced to re-evaluate polling (via grpc_pollset_kick with
@ -348,19 +349,19 @@ void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_kick(pollset, NULL);
} else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
finish_shutdown(exec_ctx, pollset);
grpc_exec_ctx_flush(exec_ctx);
/* Continuing to access pollset here is safe -- it is the caller's
* responsibility to not destroy when it has outstanding calls to
* grpc_pollset_work.
* TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(pollset->mu);
} else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
grpc_exec_ctx_flush(exec_ctx);
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(pollset->mu);
}
}
*worker_hdl = NULL;
@ -428,7 +429,7 @@ static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
* 4. The pollset may be shutting down.
*/
gpr_mu_lock(&pollset->mu);
gpr_mu_lock(pollset->mu);
/* First we need to ensure that nobody is polling concurrently */
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
@ -469,7 +470,7 @@ static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
}
}
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
/* Matching ref in basic_pollset_add_fd */
GRPC_FD_UNREF(fd, "basicpoll_add");
@ -522,7 +523,7 @@ static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
exit:
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
}
}
@ -558,14 +559,14 @@ static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
pfd[2].fd = fd->fd;
pfd[2].revents = 0;
GRPC_FD_REF(fd, "basicpoll_begin");
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
POLLOUT, &fd_watcher);
if (pfd[2].events != 0) {
nfds++;
}
} else {
gpr_mu_unlock(&pollset->mu);
gpr_mu_unlock(pollset->mu);
}
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid

@ -37,8 +37,10 @@
#include <poll.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/exec_ctx.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@ -53,21 +55,21 @@ typedef struct grpc_cached_wakeup_fd {
struct grpc_cached_wakeup_fd *next;
} grpc_cached_wakeup_fd;
typedef struct grpc_pollset_worker {
struct grpc_pollset_worker {
grpc_cached_wakeup_fd *wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker;
};
typedef struct grpc_pollset {
struct grpc_pollset {
/* pollsets under posix can mutate representation as fds are added and
removed.
For example, we may choose a poll() based implementation on linux for
few fds, and an epoll() based implementation for many fds */
const grpc_pollset_vtable *vtable;
gpr_mu mu;
gpr_mu *mu;
grpc_pollset_worker root_worker;
int in_flight_cbs;
int shutting_down;
@ -81,7 +83,7 @@ typedef struct grpc_pollset {
} data;
/* Local cache of eventfds for workers */
grpc_cached_wakeup_fd *local_wakeup_cache;
} grpc_pollset;
};
struct grpc_pollset_vtable {
void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -93,8 +95,6 @@ struct grpc_pollset_vtable {
void (*destroy)(grpc_pollset *pollset);
};
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
/* Add an fd to a pollset */
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);

@ -41,6 +41,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/pollset_set.h"
void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {

@ -35,7 +35,6 @@
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
typedef struct grpc_pollset_set {
gpr_mu mu;

@ -74,6 +74,4 @@ struct grpc_pollset {
extern gpr_mu grpc_polling_mu;
#define GRPC_POLLSET_MU(pollset) (&grpc_polling_mu)
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -40,8 +40,8 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
@ -51,9 +51,10 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/support/string.h"
#include "src/core/debug/trace.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#ifdef GPR_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL

@ -44,6 +44,7 @@
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success);

@ -34,6 +34,8 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
#define GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
#include "src/core/iomgr/wakeup_fd_posix.h"
struct grpc_fd;
struct grpc_workqueue {

@ -58,7 +58,7 @@ static gpr_once g_once = GPR_ONCE_INIT;
static void init_default_credentials(void) { gpr_mu_init(&g_mu); }
typedef struct {
grpc_pollset pollset;
grpc_pollset *pollset;
int is_done;
int success;
} compute_engine_detector;
@ -80,10 +80,10 @@ static void on_compute_engine_detection_http_response(
}
}
}
gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset));
gpr_mu_lock(&g_mu);
detector->is_done = 1;
grpc_pollset_kick(&detector->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
grpc_pollset_kick(detector->pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool s) {
@ -101,7 +101,8 @@ static int is_stack_running_on_compute_engine(void) {
on compute engine. */
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
grpc_pollset_init(&detector.pollset);
detector.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(detector.pollset, &g_mu);
detector.is_done = 0;
detector.success = 0;
@ -112,7 +113,7 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
grpc_httpcli_get(
&exec_ctx, &context, &detector.pollset, &request,
&exec_ctx, &context, detector.pollset, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
on_compute_engine_detection_http_response, &detector);
@ -120,20 +121,22 @@ static int is_stack_running_on_compute_engine(void) {
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
gpr_mu_lock(&g_mu);
while (!detector.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &detector.pollset, &worker,
grpc_pollset_work(&exec_ctx, detector.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
gpr_mu_unlock(&g_mu);
grpc_httpcli_context_destroy(&context);
grpc_closure_init(&destroy_closure, destroy_pollset, &detector.pollset);
grpc_pollset_shutdown(&exec_ctx, &detector.pollset, &destroy_closure);
grpc_pollset_shutdown(&exec_ctx, detector.pollset, &destroy_closure);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(detector.pollset);
return detector.success;
}

@ -36,18 +36,19 @@
#include <stdio.h>
#include <string.h>
#include "src/core/iomgr/timer.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/timer.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
typedef struct {
grpc_pollset_worker **worker;
@ -56,6 +57,7 @@ typedef struct {
/* Completion queue structure */
struct grpc_completion_queue {
gpr_mu mu;
/** completed events */
grpc_cq_completion completed_head;
grpc_cq_completion *completed_tail;
@ -63,8 +65,6 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
/** the set of low level i/o things that concern this cq */
grpc_pollset pollset;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@ -82,6 +82,8 @@ struct grpc_completion_queue {
grpc_completion_queue *next_free;
};
#define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
static gpr_mu g_freelist_mu;
grpc_completion_queue *g_freelist;
@ -94,7 +96,8 @@ void grpc_cq_global_shutdown(void) {
gpr_mu_destroy(&g_freelist_mu);
while (g_freelist) {
grpc_completion_queue *next = g_freelist->next_free;
grpc_pollset_destroy(&g_freelist->pollset);
grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
gpr_mu_destroy(&g_freelist->mu);
#ifndef NDEBUG
gpr_free(g_freelist->outstanding_tags);
#endif
@ -124,8 +127,9 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
if (g_freelist == NULL) {
gpr_mu_unlock(&g_freelist_mu);
cc = gpr_malloc(sizeof(grpc_completion_queue));
grpc_pollset_init(&cc->pollset);
cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
gpr_mu_init(&cc->mu);
grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
#ifndef NDEBUG
cc->outstanding_tags = NULL;
cc->outstanding_tag_capacity = 0;
@ -184,7 +188,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
grpc_pollset_reset(&cc->pollset);
grpc_pollset_reset(POLLSET_FROM_CQ(cc));
gpr_mu_lock(&g_freelist_mu);
cc->next_free = g_freelist;
g_freelist = cc;
@ -194,7 +198,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
#ifndef NDEBUG
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
GPR_ASSERT(!cc->shutdown_called);
if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
@ -203,7 +207,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
cc->outstanding_tag_capacity);
}
cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
#endif
gpr_ref(&cc->pending_events);
}
@ -231,7 +235,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
storage->next =
((uintptr_t)&cc->completed_head) | ((uintptr_t)(success != 0));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
#ifndef NDEBUG
for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
if (cc->outstanding_tags[i] == tag) {
@ -256,8 +260,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
break;
}
}
grpc_pollset_kick(&cc->pollset, pluck_worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_kick(POLLSET_FROM_CQ(cc), pluck_worker);
gpr_mu_unlock(&cc->mu);
} else {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@ -265,8 +269,9 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_shutdown(exec_ctx, POLLSET_FROM_CQ(cc),
&cc->pollset_shutdown_done);
gpr_mu_unlock(&cc->mu);
}
GPR_TIMER_END("grpc_cq_end_op", 0);
@ -294,7 +299,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
for (;;) {
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
@ -302,7 +307,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
if (c == cc->completed_tail) {
cc->completed_tail = &cc->completed_head;
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
@ -310,14 +315,14 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
if (cc->shutdown) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
@ -330,12 +335,12 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec iteration_deadline = deadline;
if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
GPR_TIMER_MARK("alarm_triggered", 0);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
continue;
}
grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now,
grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
iteration_deadline);
}
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
@ -395,7 +400,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
for (;;) {
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
@ -405,7 +410,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
if (c == cc->completed_tail) {
cc->completed_tail = prev;
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
@ -415,7 +420,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
prev = c;
}
if (cc->shutdown) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
break;
@ -425,7 +430,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
"Too many outstanding grpc_completion_queue_pluck calls: maximum "
"is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
@ -434,7 +439,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
@ -447,12 +452,12 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec iteration_deadline = deadline;
if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
GPR_TIMER_MARK("alarm_triggered", 0);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
continue;
}
grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now,
grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
iteration_deadline);
del_plucker(cc, tag, &worker);
}
@ -472,9 +477,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_lock(&cc->mu);
if (cc->shutdown_called) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
@ -482,9 +487,10 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
if (gpr_unref(&cc->pending_events)) {
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
grpc_pollset_shutdown(&exec_ctx, POLLSET_FROM_CQ(cc),
&cc->pollset_shutdown_done);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
gpr_mu_unlock(&cc->mu);
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
@ -498,7 +504,7 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
return &cc->pollset;
return POLLSET_FROM_CQ(cc);
}
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }

@ -85,7 +85,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_slice_buffer_init(&state.incoming_buffer);
gpr_slice_buffer_init(&state.temp_incoming_buffer);
state.tcp = tcp;
grpc_endpoint_add_to_pollset(exec_ctx, tcp, &server->pollset);
grpc_endpoint_add_to_pollset(exec_ctx, tcp, server->pollset);
grpc_endpoint_read(exec_ctx, tcp, &state.temp_incoming_buffer, &on_read);
}

@ -35,21 +35,23 @@
#include <string.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct fullstack_fixture_data {
char *localaddr;

@ -35,18 +35,20 @@
#include <string.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"

@ -36,17 +36,19 @@
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include "src/core/channel/channel_args.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/security/credentials.h"
#include "src/core/support/env.h"
#include "src/core/support/file.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
#include "test/core/util/port.h"
#include "test/core/end2end/data/ssl_test_data.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
typedef struct fullstack_secure_fixture_data {
char *localaddr;

@ -35,6 +35,13 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/client_uchannel.h"
@ -46,13 +53,6 @@
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -253,30 +253,33 @@ static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
}
static grpc_connected_subchannel *connect_subchannel(grpc_subchannel *c) {
grpc_pollset pollset;
gpr_mu mu;
gpr_mu_init(&mu);
grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_init(&pollset);
grpc_pollset_init(pollset, &mu);
grpc_pollset_set_init(&g_interested_parties);
grpc_pollset_set_add_pollset(&exec_ctx, &g_interested_parties, &pollset);
grpc_pollset_set_add_pollset(&exec_ctx, &g_interested_parties, pollset);
grpc_subchannel_notify_on_state_change(&exec_ctx, c, &g_interested_parties,
&g_state,
grpc_closure_create(state_changed, c));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
gpr_mu_lock(&mu);
while (g_state != GRPC_CHANNEL_READY) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
grpc_pollset_work(&exec_ctx, pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
gpr_mu_unlock(&mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
gpr_mu_lock(&mu);
}
grpc_pollset_shutdown(&exec_ctx, &pollset,
grpc_closure_create(destroy_pollset, &pollset));
grpc_pollset_shutdown(&exec_ctx, pollset,
grpc_closure_create(destroy_pollset, pollset));
grpc_pollset_set_destroy(&g_interested_parties);
gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
gpr_mu_unlock(&mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pollset);
gpr_mu_destroy(&mu);
return grpc_subchannel_get_connected_subchannel(c);
}

@ -37,13 +37,6 @@
#include <string.h>
#include <unistd.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
@ -51,6 +44,15 @@
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"

@ -36,18 +36,19 @@
#include <string.h>
#include <grpc/grpc.h>
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/iomgr.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
static int g_done = 0;
static grpc_httpcli_context g_context;
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static gpr_timespec n_seconds_time(int seconds) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(seconds);
@ -63,10 +64,10 @@ static void on_finish(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(response->status == 200);
GPR_ASSERT(response->body_length == strlen(expect));
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length));
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
g_done = 1;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void test_get(int port) {
@ -85,18 +86,18 @@ static void test_get(int port) {
req.path = "/get";
req.handshaker = &grpc_httpcli_plaintext;
grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15),
grpc_httpcli_get(&exec_ctx, &g_context, g_pollset, &req, n_seconds_time(15),
on_finish, (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_free(host);
}
@ -116,18 +117,18 @@ static void test_post(int port) {
req.path = "/post";
req.handshaker = &grpc_httpcli_plaintext;
grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5,
grpc_httpcli_post(&exec_ctx, &g_context, g_pollset, &req, "hello", 5,
n_seconds_time(15), on_finish, (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_free(host);
}
@ -175,17 +176,22 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_httpcli_context_init(&g_context);
grpc_pollset_init(&g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&g_mu);
grpc_pollset_init(g_pollset, &g_mu);
test_get(port);
test_post(port);
grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_mu_destroy(&g_mu);
gpr_free(g_pollset);
gpr_subprocess_destroy(server);
return 0;

@ -36,18 +36,19 @@
#include <string.h>
#include <grpc/grpc.h>
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/iomgr.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
static int g_done = 0;
static grpc_httpcli_context g_context;
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static gpr_timespec n_seconds_time(int seconds) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(seconds);
@ -63,10 +64,10 @@ static void on_finish(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(response->status == 200);
GPR_ASSERT(response->body_length == strlen(expect));
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length));
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
g_done = 1;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void test_get(int port) {
@ -86,18 +87,18 @@ static void test_get(int port) {
req.path = "/get";
req.handshaker = &grpc_httpcli_ssl;
grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15),
grpc_httpcli_get(&exec_ctx, &g_context, g_pollset, &req, n_seconds_time(15),
on_finish, (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_free(host);
}
@ -118,18 +119,18 @@ static void test_post(int port) {
req.path = "/post";
req.handshaker = &grpc_httpcli_ssl;
grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5,
grpc_httpcli_post(&exec_ctx, &g_context, g_pollset, &req, "hello", 5,
n_seconds_time(15), on_finish, (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_free(host);
}
@ -178,17 +179,21 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_httpcli_context_init(&g_context);
grpc_pollset_init(&g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
test_get(port);
test_post(port);
grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
gpr_subprocess_destroy(server);
return 0;

@ -39,10 +39,11 @@
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/endpoint_pair.h"
#include "test/core/util/test_config.h"
#include "test/core/iomgr/endpoint_tests.h"
#include "test/core/util/test_config.h"
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static void clean_up(void) {}
@ -54,8 +55,8 @@ static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
f.client_ep = p.client;
f.server_ep = p.server;
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
return f;
@ -72,14 +73,18 @@ static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool success) {
int main(int argc, char **argv) {
grpc_closure destroyed;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_init(&g_mu);
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_init(&g_pollset);
grpc_endpoint_tests(configs[0], &g_pollset);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, &g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_mu_destroy(&g_mu);
gpr_free(g_pollset);
return 0;
}

@ -36,8 +36,8 @@
#include <sys/types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/slice.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/util/test_config.h"
@ -58,6 +58,7 @@
*/
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
size_t count_slices(gpr_slice *slices, size_t nslices, int *current_data) {
@ -134,10 +135,10 @@ static void read_and_write_test_read_handler(grpc_exec_ctx *exec_ctx,
state->incoming.slices, state->incoming.count, &state->current_read_data);
if (state->bytes_read == state->target_bytes || !success) {
gpr_log(GPR_INFO, "Read handler done");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_lock(g_mu);
state->read_done = 1 + success;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_unlock(g_mu);
} else if (success) {
grpc_endpoint_read(exec_ctx, state->read_ep, &state->incoming,
&state->done_read);
@ -169,10 +170,10 @@ static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
}
gpr_log(GPR_INFO, "Write handler done");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_lock(g_mu);
state->write_done = 1 + success;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_unlock(g_mu);
}
/* Do both reading and writing using the grpc_endpoint API.
@ -232,14 +233,14 @@ static void read_and_write_test(grpc_endpoint_test_config config,
}
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_lock(g_mu);
while (!state.read_done || !state.write_done) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
end_test(config);
@ -251,9 +252,10 @@ static void read_and_write_test(grpc_endpoint_test_config config,
}
void grpc_endpoint_tests(grpc_endpoint_test_config config,
grpc_pollset *pollset) {
grpc_pollset *pollset, gpr_mu *mu) {
size_t i;
g_pollset = pollset;
g_mu = mu;
read_and_write_test(config, 10000000, 100000, 8192, 0);
read_and_write_test(config, 1000000, 100000, 1, 0);
read_and_write_test(config, 100000000, 100000, 1, 1);

@ -53,6 +53,6 @@ struct grpc_endpoint_test_config {
};
void grpc_endpoint_tests(grpc_endpoint_test_config config,
grpc_pollset *pollset);
grpc_pollset *pollset, gpr_mu *mu);
#endif /* GRPC_TEST_CORE_IOMGR_ENDPOINT_TESTS_H */

@ -49,9 +49,12 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/pollset_posix.h"
#include "test/core/util/test_config.h"
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
/* buffer size used to send and receive data.
1024 is the minimal value to set TCP send and receive buffer. */
@ -179,10 +182,10 @@ static void listen_shutdown_cb(grpc_exec_ctx *exec_ctx, void *arg /*server */,
grpc_fd_orphan(exec_ctx, sv->em_fd, NULL, NULL, "b");
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
sv->done = 1;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
/* Called when a new TCP connection request arrives in the listening port. */
@ -209,7 +212,7 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
se = gpr_malloc(sizeof(*se));
se->sv = sv;
se->em_fd = grpc_fd_create(fd, "listener");
grpc_pollset_add_fd(exec_ctx, &g_pollset, se->em_fd);
grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
se->session_read_closure.cb = session_read_cb;
se->session_read_closure.cb_arg = se;
grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
@ -238,7 +241,7 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);
sv->em_fd = grpc_fd_create(fd, "server");
grpc_pollset_add_fd(exec_ctx, &g_pollset, sv->em_fd);
grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
/* Register to be interested in reading from listen_fd. */
sv->listen_closure.cb = listen_cb;
sv->listen_closure.cb_arg = sv;
@ -249,18 +252,18 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
/* Wait and shutdown a sever. */
static void server_wait_and_shutdown(server *sv) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!sv->done) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
}
/* ===An upload client to test notify_on_write=== */
@ -296,7 +299,7 @@ static void client_session_shutdown_cb(grpc_exec_ctx *exec_ctx,
client *cl = arg;
grpc_fd_orphan(exec_ctx, cl->em_fd, NULL, NULL, "c");
cl->done = 1;
grpc_pollset_kick(&g_pollset, NULL);
grpc_pollset_kick(g_pollset, NULL);
}
/* Write as much as possible, then register notify_on_write. */
@ -307,9 +310,9 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
ssize_t write_once = 0;
if (!success) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
client_session_shutdown_cb(exec_ctx, arg, 1);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
return;
}
@ -319,7 +322,7 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
} while (write_once > 0);
if (errno == EAGAIN) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
cl->write_closure.cb = client_session_write;
cl->write_closure.cb_arg = cl;
@ -328,7 +331,7 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
} else {
client_session_shutdown_cb(exec_ctx, arg, 1);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
} else {
gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno));
abort();
@ -357,25 +360,25 @@ static void client_start(grpc_exec_ctx *exec_ctx, client *cl, int port) {
}
cl->em_fd = grpc_fd_create(fd, "client");
grpc_pollset_add_fd(exec_ctx, &g_pollset, cl->em_fd);
grpc_pollset_add_fd(exec_ctx, g_pollset, cl->em_fd);
client_session_write(exec_ctx, cl, 1);
}
/* Wait for the signal to shutdown a client. */
static void client_wait_and_shutdown(client *cl) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!cl->done) {
grpc_pollset_worker *worker = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
}
/* Test grpc_fd. Start an upload server and client, upload a stream of
@ -410,20 +413,20 @@ static void first_read_callback(grpc_exec_ctx *exec_ctx,
void *arg /* fd_change_data */, bool success) {
fd_change_data *fdc = arg;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
fdc->cb_that_ran = first_read_callback;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void second_read_callback(grpc_exec_ctx *exec_ctx,
void *arg /* fd_change_data */, bool success) {
fd_change_data *fdc = arg;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
fdc->cb_that_ran = second_read_callback;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
/* Test that changing the callback we use for notify_on_read actually works.
@ -456,7 +459,7 @@ static void test_grpc_fd_change(void) {
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
em_fd = grpc_fd_create(sv[0], "test_grpc_fd_change");
grpc_pollset_add_fd(&exec_ctx, &g_pollset, em_fd);
grpc_pollset_add_fd(&exec_ctx, g_pollset, em_fd);
/* Register the first callback, then make its FD readable */
grpc_fd_notify_on_read(&exec_ctx, em_fd, &first_closure);
@ -465,18 +468,18 @@ static void test_grpc_fd_change(void) {
GPR_ASSERT(result == 1);
/* And now wait for it to run. */
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (a.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
GPR_ASSERT(a.cb_that_ran == first_read_callback);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
/* And drain the socket so we can generate a new read edge */
result = read(sv[0], &data, 1);
@ -489,19 +492,19 @@ static void test_grpc_fd_change(void) {
result = write(sv[1], &data, 1);
GPR_ASSERT(result == 1);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (b.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
/* Except now we verify that second_read_callback ran instead */
GPR_ASSERT(b.cb_that_ran == second_read_callback);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_fd_orphan(&exec_ctx, em_fd, NULL, NULL, "d");
grpc_exec_ctx_finish(&exec_ctx);
@ -519,12 +522,16 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_pollset_init(&g_pollset);
gpr_mu_init(&g_mu);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
test_grpc_fd();
test_grpc_fd_change();
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
grpc_iomgr_shutdown();
return 0;
}

@ -40,6 +40,7 @@
#include <unistd.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@ -49,7 +50,8 @@
#include "test/core/util/test_config.h"
static grpc_pollset_set g_pollset_set;
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static int g_connections_complete = 0;
static grpc_endpoint *g_connecting = NULL;
@ -58,10 +60,10 @@ static gpr_timespec test_deadline(void) {
}
static void finish_connection() {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
g_connections_complete++;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void must_succeed(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
@ -99,9 +101,9 @@ void test_succeeds(void) {
GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
GPR_ASSERT(0 == listen(svr_fd, 1));
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
connections_complete_before = g_connections_complete;
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
/* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
@ -118,19 +120,19 @@ void test_succeeds(void) {
GPR_ASSERT(r >= 0);
close(r);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
}
@ -147,9 +149,9 @@ void test_fails(void) {
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
connections_complete_before = g_connections_complete;
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
/* connect to a broken address */
grpc_closure_init(&done, must_fail, NULL);
@ -157,7 +159,7 @@ void test_fails(void) {
(struct sockaddr *)&addr, addr_len,
gpr_inf_future(GPR_CLOCK_REALTIME));
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
/* wait for the connection callback to finish */
while (g_connections_complete == connections_complete_before) {
@ -165,14 +167,14 @@ void test_fails(void) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec polling_deadline = test_deadline();
if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
grpc_pollset_work(&exec_ctx, &g_pollset, &worker, now, polling_deadline);
grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
}
@ -217,16 +219,16 @@ void test_times_out(void) {
connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
connections_complete_before = g_connections_complete;
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_closure_init(&done, must_fail, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set,
(struct sockaddr *)&addr, addr_len, connect_deadline);
/* Make sure the event doesn't trigger early */
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
for (;;) {
grpc_pollset_worker *worker = NULL;
gpr_timespec now = gpr_now(connect_deadline.clock_type);
@ -252,13 +254,13 @@ void test_times_out(void) {
}
gpr_timespec polling_deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
grpc_pollset_work(&exec_ctx, &g_pollset, &worker, now, polling_deadline);
grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
@ -278,17 +280,21 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_set_init(&g_pollset_set);
grpc_pollset_init(&g_pollset);
grpc_pollset_set_add_pollset(&exec_ctx, &g_pollset_set, &g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&g_mu);
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_set_add_pollset(&exec_ctx, &g_pollset_set, g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
test_succeeds();
gpr_log(GPR_ERROR, "End of first test");
test_fails();
test_times_out();
grpc_pollset_set_destroy(&g_pollset_set);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
return 0;
}

@ -36,8 +36,8 @@
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <grpc/grpc.h>
@ -45,10 +45,11 @@
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/util/test_config.h"
#include "test/core/iomgr/endpoint_tests.h"
#include "test/core/util/test_config.h"
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
/*
General test notes:
@ -145,7 +146,7 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
GPR_ASSERT(success);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
current_data = state->read_bytes % 256;
read_bytes = count_slices(state->incoming.slices, state->incoming.count,
&current_data);
@ -153,10 +154,10 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes,
state->target_read_bytes);
if (state->read_bytes >= state->target_read_bytes) {
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
} else {
grpc_endpoint_read(exec_ctx, state->ep, &state->incoming, &state->read_cb);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
}
}
@ -175,7 +176,7 @@ static void read_test(size_t num_bytes, size_t slice_size) {
create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -188,17 +189,17 @@ static void read_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_slice_buffer_destroy(&state.incoming);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -221,7 +222,7 @@ static void large_read_test(size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
"test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket(sv[0]);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -234,17 +235,17 @@ static void large_read_test(size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_slice_buffer_destroy(&state.incoming);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -283,11 +284,11 @@ static void write_done(grpc_exec_ctx *exec_ctx,
void *user_data /* write_socket_state */, bool success) {
struct write_socket_state *state = (struct write_socket_state *)user_data;
gpr_log(GPR_INFO, "Write done callback called");
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
gpr_log(GPR_INFO, "Signalling write done");
state->write_done = 1;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
@ -304,11 +305,11 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
for (;;) {
grpc_pollset_worker *worker = NULL;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_mu_lock(&g_mu);
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
do {
bytes_read =
@ -350,7 +351,7 @@ static void write_test(size_t num_bytes, size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
state.ep = ep;
state.write_done = 0;
@ -363,19 +364,19 @@ static void write_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
drain_socket_blocking(sv[0], num_bytes, num_bytes);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
for (;;) {
grpc_pollset_worker *worker = NULL;
if (state.write_done) {
break;
}
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_slice_buffer_destroy(&outgoing);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -386,7 +387,7 @@ static void write_test(size_t num_bytes, size_t slice_size) {
void on_fd_released(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
int *done = arg;
*done = 1;
grpc_pollset_kick(&g_pollset, NULL);
grpc_pollset_kick(g_pollset, NULL);
}
/* Do a read_test, then release fd and try to read/write again. Verify that
@ -410,7 +411,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test");
GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -423,27 +424,27 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
gpr_slice_buffer_destroy(&state.incoming);
grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
while (!fd_released_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
GPR_ASSERT(fd_released_done == 1);
GPR_ASSERT(fd == sv[1]);
grpc_exec_ctx_finish(&exec_ctx);
@ -491,8 +492,8 @@ static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
slice_size, "test");
f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
slice_size, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
@ -512,13 +513,17 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_init(&g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&g_mu);
grpc_pollset_init(g_pollset, &g_mu);
run_tests();
grpc_endpoint_tests(configs[0], &g_pollset);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_endpoint_tests(configs[0], g_pollset, &g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
return 0;
}

@ -32,24 +32,28 @@
*/
#include "src/core/iomgr/tcp_server.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include <errno.h>
#include <netinet/in.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include <errno.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <string.h>
#include <unistd.h>
#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", #x)
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static int g_nconnects = 0;
typedef struct on_connect_result {
@ -113,11 +117,11 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_endpoint_shutdown(exec_ctx, tcp);
grpc_endpoint_destroy(exec_ctx, tcp);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
on_connect_result_set(&g_result, acceptor);
g_nconnects++;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void test_no_op(void) {
@ -174,7 +178,7 @@ static void tcp_connect(grpc_exec_ctx *exec_ctx, const struct sockaddr *remote,
int clifd = socket(remote->sa_family, SOCK_STREAM, 0);
int nconnects_before;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
nconnects_before = g_nconnects;
on_connect_result_init(&g_result);
GPR_ASSERT(clifd >= 0);
@ -184,18 +188,18 @@ static void tcp_connect(grpc_exec_ctx *exec_ctx, const struct sockaddr *remote,
while (g_nconnects == nconnects_before &&
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(exec_ctx, &g_pollset, &worker,
grpc_pollset_work(exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
}
gpr_log(GPR_DEBUG, "wait done");
GPR_ASSERT(g_nconnects == nconnects_before + 1);
close(clifd);
*result = g_result;
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(&g_mu);
}
/* Tests a tcp server with multiple ports. TODO(daniel-j-born): Multiple fds for
@ -210,7 +214,6 @@ static void test_connect(unsigned n) {
unsigned svr1_fd_count;
int svr1_port;
grpc_tcp_server *s = grpc_tcp_server_create(NULL);
grpc_pollset *pollsets[1];
unsigned i;
server_weak_ref weak_ref;
server_weak_ref_init(&weak_ref);
@ -259,8 +262,7 @@ static void test_connect(unsigned n) {
}
}
pollsets[0] = &g_pollset;
grpc_tcp_server_start(&exec_ctx, s, pollsets, 1, on_connect, NULL);
grpc_tcp_server_start(&exec_ctx, s, &g_pollset, 1, on_connect, NULL);
for (i = 0; i < n; i++) {
on_connect_result result;
@ -312,7 +314,9 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_init(&g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&g_mu);
grpc_pollset_init(g_pollset, &g_mu);
test_no_op();
test_no_op_with_start();
@ -321,9 +325,11 @@ int main(int argc, char **argv) {
test_connect(1);
test_connect(10);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
return 0;
}

@ -34,18 +34,20 @@
#include "src/core/iomgr/workqueue.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static void must_succeed(grpc_exec_ctx *exec_ctx, void *p, bool success) {
GPR_ASSERT(success == 1);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
*(int *)p = 1;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(&g_mu);
}
static void test_ref_unref(void) {
@ -67,13 +69,13 @@ static void test_add_closure(void) {
grpc_closure_init(&c, must_succeed, &done);
grpc_workqueue_push(wq, &c, 1);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, &g_pollset);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
GPR_ASSERT(!done);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(deadline.clock_type), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type),
deadline);
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
@ -92,13 +94,13 @@ static void test_flush(void) {
grpc_exec_ctx_enqueue(&exec_ctx, &c, true, NULL);
grpc_workqueue_flush(&exec_ctx, wq);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, &g_pollset);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(&g_mu);
GPR_ASSERT(!done);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(deadline.clock_type), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type),
deadline);
gpr_mu_unlock(&g_mu);
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
@ -115,15 +117,20 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_init(&g_pollset);
gpr_mu_init(&g_mu);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
test_ref_unref();
test_add_closure();
test_flush();
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
return 0;
}

@ -45,7 +45,8 @@
#include "src/core/security/credentials.h"
typedef struct {
grpc_pollset pollset;
gpr_mu mu;
grpc_pollset *pollset;
int is_done;
char *token;
} oauth2_request;
@ -66,11 +67,11 @@ static void on_oauth2_response(grpc_exec_ctx *exec_ctx, void *user_data,
GPR_SLICE_LENGTH(token_slice));
token[GPR_SLICE_LENGTH(token_slice)] = '\0';
}
gpr_mu_lock(GRPC_POLLSET_MU(&request->pollset));
gpr_mu_lock(&request->mu);
request->is_done = 1;
request->token = token;
grpc_pollset_kick(&request->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&request->pollset));
grpc_pollset_kick(request->pollset, NULL);
gpr_mu_unlock(&request->mu);
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *unused, bool success) {}
@ -82,28 +83,30 @@ char *grpc_test_fetch_oauth2_token_with_credentials(
grpc_closure do_nothing_closure;
grpc_auth_metadata_context null_ctx = {"", "", NULL, NULL};
grpc_pollset_init(&request.pollset);
request.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(request.pollset, &request.mu);
request.is_done = 0;
grpc_closure_init(&do_nothing_closure, do_nothing, NULL);
grpc_call_credentials_get_request_metadata(&exec_ctx, creds, &request.pollset,
grpc_call_credentials_get_request_metadata(&exec_ctx, creds, request.pollset,
null_ctx, on_oauth2_response,
&request);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
gpr_mu_lock(&request.mu);
while (!request.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &request.pollset, &worker,
grpc_pollset_work(&exec_ctx, request.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));
gpr_mu_unlock(&request.mu);
grpc_pollset_shutdown(&exec_ctx, &request.pollset, &do_nothing_closure);
grpc_pollset_shutdown(&exec_ctx, request.pollset, &do_nothing_closure);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_destroy(&request.pollset);
grpc_pollset_destroy(request.pollset);
gpr_free(request.pollset);
return request.token;
}

@ -34,8 +34,6 @@
#include <stdio.h>
#include <string.h>
#include "src/core/security/credentials.h"
#include "src/core/support/string.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
@ -44,8 +42,12 @@
#include <grpc/support/slice.h>
#include <grpc/support/sync.h>
#include "src/core/security/credentials.h"
#include "src/core/support/string.h"
typedef struct {
grpc_pollset pollset;
gpr_mu mu;
grpc_pollset *pollset;
int is_done;
} synchronizer;
@ -62,10 +64,10 @@ static void on_metadata_response(grpc_exec_ctx *exec_ctx, void *user_data,
printf("\nGot token: %s\n\n", token);
gpr_free(token);
}
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
gpr_mu_lock(&sync->mu);
sync->is_done = 1;
grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
grpc_pollset_kick(sync->pollset, NULL);
gpr_mu_unlock(&sync->mu);
}
int main(int argc, char **argv) {
@ -91,26 +93,30 @@ int main(int argc, char **argv) {
goto end;
}
grpc_pollset_init(&sync.pollset);
sync.pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&sync.mu);
grpc_pollset_init(sync.pollset, &sync.mu);
sync.is_done = 0;
grpc_call_credentials_get_request_metadata(
&exec_ctx, ((grpc_composite_channel_credentials *)creds)->call_creds,
&sync.pollset, context, on_metadata_response, &sync);
sync.pollset, context, on_metadata_response, &sync);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_lock(&sync.mu);
while (!sync.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &sync.pollset, &worker,
grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_unlock(&sync.mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_lock(&sync.mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_unlock(&sync.mu);
grpc_channel_credentials_release(creds);
gpr_free(sync.pollset);
gpr_mu_destroy(&sync.mu);
end:
gpr_cmdline_destroy(cl);

@ -36,16 +36,17 @@
#include <fcntl.h>
#include <sys/types.h>
#include "src/core/security/secure_endpoint.h"
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/iomgr.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/security/secure_endpoint.h"
#include "src/core/tsi/fake_transport_security.h"
#include "test/core/util/test_config.h"
static grpc_pollset g_pollset;
static gpr_mu g_mu;
static grpc_pollset *g_pollset;
static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
size_t slice_size, gpr_slice *leftover_slices, size_t leftover_nslices) {
@ -56,8 +57,8 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
grpc_endpoint_pair tcp;
tcp = grpc_iomgr_create_endpoint_pair("fixture", slice_size);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset);
if (leftover_nslices == 0) {
f.client_ep =
@ -181,13 +182,18 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_pollset_init(&g_pollset);
grpc_endpoint_tests(configs[0], &g_pollset);
g_pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&g_mu);
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, &g_mu);
test_leftover(configs[1], 1);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_mu_destroy(&g_mu);
return 0;
}

@ -34,7 +34,6 @@
#include <stdio.h>
#include <string.h>
#include "src/core/security/jwt_verifier.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
@ -43,8 +42,11 @@
#include <grpc/support/slice.h>
#include <grpc/support/sync.h>
#include "src/core/security/jwt_verifier.h"
typedef struct {
grpc_pollset pollset;
grpc_pollset *pollset;
gpr_mu mu;
int is_done;
int success;
} synchronizer;
@ -77,10 +79,10 @@ static void on_jwt_verification_done(void *user_data,
grpc_jwt_verifier_status_to_string(status));
}
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
gpr_mu_lock(&sync->mu);
sync->is_done = 1;
grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
grpc_pollset_kick(sync->pollset, NULL);
gpr_mu_unlock(&sync->mu);
}
int main(int argc, char **argv) {
@ -103,23 +105,28 @@ int main(int argc, char **argv) {
grpc_init();
grpc_pollset_init(&sync.pollset);
sync.pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&sync.mu);
grpc_pollset_init(sync.pollset, &sync.mu);
sync.is_done = 0;
grpc_jwt_verifier_verify(&exec_ctx, verifier, &sync.pollset, jwt, aud,
grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud,
on_jwt_verification_done, &sync);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_lock(&sync.mu);
while (!sync.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &sync.pollset, &worker,
grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_unlock(&sync.mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_lock(&sync.mu);
}
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
gpr_mu_unlock(&sync.mu);
gpr_mu_destroy(&sync.mu);
gpr_free(sync.pollset);
grpc_jwt_verifier_destroy(verifier);
gpr_cmdline_destroy(cl);

@ -69,7 +69,8 @@ static int has_port_been_chosen(int port) {
}
typedef struct freereq {
grpc_pollset pollset;
gpr_mu mu;
grpc_pollset *pollset;
int done;
} freereq;
@ -82,10 +83,10 @@ static void destroy_pollset_and_shutdown(grpc_exec_ctx *exec_ctx, void *p,
static void freed_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
const grpc_httpcli_response *response) {
freereq *pr = arg;
gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
gpr_mu_lock(&pr->mu);
pr->done = 1;
grpc_pollset_kick(&pr->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
grpc_pollset_kick(pr->pollset, NULL);
gpr_mu_unlock(&pr->mu);
}
static void free_port_using_server(char *server, int port) {
@ -100,31 +101,36 @@ static void free_port_using_server(char *server, int port) {
memset(&pr, 0, sizeof(pr));
memset(&req, 0, sizeof(req));
grpc_pollset_init(&pr.pollset);
pr.pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&pr.mu);
grpc_pollset_init(pr.pollset, &pr.mu);
grpc_closure_init(&shutdown_closure, destroy_pollset_and_shutdown,
&pr.pollset);
pr.pollset);
req.host = server;
gpr_asprintf(&path, "/drop/%d", port);
req.path = path;
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, &pr.pollset, &req,
grpc_httpcli_get(&exec_ctx, &context, pr.pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), freed_port_from_server,
&pr);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
gpr_mu_lock(&pr.mu);
while (!pr.done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &pr.pollset, &worker,
grpc_pollset_work(&exec_ctx, pr.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
gpr_mu_unlock(&pr.mu);
grpc_httpcli_context_destroy(&context);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_shutdown(&exec_ctx, &pr.pollset, &shutdown_closure);
grpc_pollset_shutdown(&exec_ctx, pr.pollset, &shutdown_closure);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pr.pollset);
gpr_mu_destroy(&pr.mu);
gpr_free(path);
}
@ -202,7 +208,8 @@ static int is_port_available(int *port, int is_tcp) {
}
typedef struct portreq {
grpc_pollset pollset;
gpr_mu mu;
grpc_pollset *pollset;
int port;
int retries;
char *server;
@ -234,7 +241,7 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
pr->retries++;
req.host = pr->server;
req.path = "/get";
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pollset, &req,
grpc_httpcli_get(exec_ctx, pr->ctx, pr->pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
pr);
return;
@ -246,10 +253,10 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
port = port * 10 + response->body[i] - '0';
}
GPR_ASSERT(port > 1024);
gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
gpr_mu_lock(&pr->mu);
pr->port = port;
grpc_pollset_kick(&pr->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
grpc_pollset_kick(pr->pollset, NULL);
gpr_mu_unlock(&pr->mu);
}
static int pick_port_using_server(char *server) {
@ -263,9 +270,11 @@ static int pick_port_using_server(char *server) {
memset(&pr, 0, sizeof(pr));
memset(&req, 0, sizeof(req));
grpc_pollset_init(&pr.pollset);
pr.pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&pr.mu);
grpc_pollset_init(pr.pollset, &pr.mu);
grpc_closure_init(&shutdown_closure, destroy_pollset_and_shutdown,
&pr.pollset);
pr.pollset);
pr.port = -1;
pr.server = server;
pr.ctx = &context;
@ -274,22 +283,24 @@ static int pick_port_using_server(char *server) {
req.path = "/get";
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, &pr.pollset, &req,
grpc_httpcli_get(&exec_ctx, &context, pr.pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
&pr);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
gpr_mu_lock(&pr.mu);
while (pr.port == -1) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &pr.pollset, &worker,
grpc_pollset_work(&exec_ctx, pr.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
gpr_mu_unlock(&pr.mu);
grpc_httpcli_context_destroy(&context);
grpc_pollset_shutdown(&exec_ctx, &pr.pollset, &shutdown_closure);
grpc_pollset_shutdown(&exec_ctx, pr.pollset, &shutdown_closure);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pr.pollset);
gpr_mu_destroy(&pr.mu);
return pr.port;
}

@ -57,8 +57,9 @@ void test_tcp_server_init(test_tcp_server *server,
server->tcp_server = NULL;
grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server);
server->shutdown = 0;
grpc_pollset_init(&server->pollset);
server->pollsets[0] = &server->pollset;
server->pollset = gpr_malloc(grpc_pollset_size());
gpr_mu_init(&server->mu);
grpc_pollset_init(server->pollset, &server->mu);
server->on_connect = on_connect;
server->cb_data = user_data;
}
@ -77,7 +78,7 @@ void test_tcp_server_start(test_tcp_server *server, int port) {
grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr));
GPR_ASSERT(port_added == port);
grpc_tcp_server_start(&exec_ctx, server->tcp_server, server->pollsets, 1,
grpc_tcp_server_start(&exec_ctx, server->tcp_server, &server->pollset, 1,
server->on_connect, server->cb_data);
gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);
@ -90,10 +91,10 @@ void test_tcp_server_poll(test_tcp_server *server, int seconds) {
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(seconds, GPR_TIMESPAN));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
grpc_pollset_work(&exec_ctx, &server->pollset, &worker,
gpr_mu_lock(&server->mu);
grpc_pollset_work(&exec_ctx, server->pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
gpr_mu_unlock(&server->mu);
grpc_exec_ctx_finish(&exec_ctx);
}
@ -111,8 +112,10 @@ void test_tcp_server_destroy(test_tcp_server *server) {
gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
test_tcp_server_poll(server, 1);
}
grpc_pollset_shutdown(&exec_ctx, &server->pollset, &do_nothing_cb);
grpc_pollset_shutdown(&exec_ctx, server->pollset, &do_nothing_cb);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_destroy(&server->pollset);
grpc_pollset_destroy(server->pollset);
gpr_free(server->pollset);
gpr_mu_destroy(&server->mu);
grpc_shutdown();
}

@ -41,8 +41,8 @@ typedef struct test_tcp_server {
grpc_tcp_server *tcp_server;
grpc_closure shutdown_complete;
int shutdown;
grpc_pollset pollset;
grpc_pollset *pollsets[1];
gpr_mu mu;
grpc_pollset *pollset;
grpc_tcp_server_cb on_connect;
void *cb_data;
} test_tcp_server;

Loading…
Cancel
Save