Merge github.com:grpc/grpc into propagate

pull/2800/head^2
Craig Tiller 9 years ago
commit 22de7b65c0
  1. 6
      BUILD
  2. 172
      Makefile
  3. 79
      build.json
  4. 3
      gRPC.podspec
  5. 9
      include/grpc/grpc.h
  6. 6
      src/core/client_config/subchannel.c
  7. 18
      src/core/iomgr/fd_posix.c
  8. 9
      src/core/iomgr/fd_posix.h
  9. 21
      src/core/iomgr/pollset.h
  10. 168
      src/core/iomgr/pollset_kick_posix.c
  11. 93
      src/core/iomgr/pollset_kick_posix.h
  12. 124
      src/core/iomgr/pollset_multipoller_with_epoll.c
  13. 123
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  14. 139
      src/core/iomgr/pollset_posix.c
  15. 29
      src/core/iomgr/pollset_posix.h
  16. 83
      src/core/iomgr/pollset_windows.c
  17. 12
      src/core/iomgr/pollset_windows.h
  18. 8
      src/core/iomgr/wakeup_fd_eventfd.c
  19. 12
      src/core/iomgr/wakeup_fd_pipe.c
  20. 10
      src/core/iomgr/wakeup_fd_posix.c
  21. 20
      src/core/iomgr/wakeup_fd_posix.h
  22. 6
      src/core/security/google_default_credentials.c
  23. 70
      src/core/surface/completion_queue.c
  24. 2
      src/core/surface/completion_queue.h
  25. 73
      src/core/surface/server.c
  26. 6
      src/core/transport/chttp2/internal.h
  27. 3
      src/core/transport/chttp2/stream_lists.c
  28. 30
      src/core/transport/chttp2/writing.c
  29. 8
      src/core/transport/chttp2_transport.c
  30. 8
      test/core/httpcli/httpcli_test.c
  31. 21
      test/core/iomgr/endpoint_tests.c
  32. 20
      test/core/iomgr/fd_posix_test.c
  33. 130
      test/core/iomgr/poll_kick_posix_test.c
  34. 11
      test/core/iomgr/tcp_client_posix_test.c
  35. 17
      test/core/iomgr/tcp_posix_test.c
  36. 5
      test/core/iomgr/tcp_server_posix_test.c
  37. 9
      test/core/security/oauth2_utils.c
  38. 9
      test/core/security/print_google_default_creds_token.c
  39. 9
      test/core/security/verify_jwt.c
  40. 160
      test/core/util/reconnect_server.c
  41. 69
      test/core/util/reconnect_server.h
  42. 25
      test/cpp/end2end/end2end_test.cc
  43. 6
      test/cpp/interop/client.cc
  44. 19
      test/cpp/interop/interop_client.cc
  45. 1
      test/cpp/interop/interop_client.h
  46. 103
      test/cpp/interop/reconnect_interop_client.cc
  47. 190
      test/cpp/interop/reconnect_interop_server.cc
  48. 7
      test/cpp/interop/server.cc
  49. 8
      test/proto/messages.proto
  50. 6
      test/proto/test.proto
  51. 2
      tools/doxygen/Doxyfile.core.internal
  52. 86
      tools/run_tests/sources_and_headers.json
  53. 8
      tools/run_tests/tests.json
  54. 25
      vsprojects/Grpc.mak
  55. 3
      vsprojects/grpc/grpc.vcxproj
  56. 6
      vsprojects/grpc/grpc.vcxproj.filters
  57. 3
      vsprojects/grpc_unsecure/grpc_unsecure.vcxproj
  58. 6
      vsprojects/grpc_unsecure/grpc_unsecure.vcxproj.filters

@ -186,7 +186,6 @@ cc_library(
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",
@ -308,7 +307,6 @@ cc_library(
"src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -448,7 +446,6 @@ cc_library(
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",
@ -547,7 +544,6 @@ cc_library(
"src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -1033,7 +1029,6 @@ objc_library(
"src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -1175,7 +1170,6 @@ objc_library(
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",

File diff suppressed because one or more lines are too long

@ -151,7 +151,6 @@
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",
@ -249,7 +248,6 @@
"src/core/iomgr/iomgr.c", "src/core/iomgr/iomgr.c",
"src/core/iomgr/iomgr_posix.c", "src/core/iomgr/iomgr_posix.c",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -575,6 +573,23 @@
"secure": "no", "secure": "no",
"vs_project_guid": "{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}" "vs_project_guid": "{46CEDFFF-9692-456A-AA24-38B5D6BCF4C5}"
}, },
{
"name": "reconnect_server",
"build": "private",
"language": "c",
"headers": [
"test/core/util/reconnect_server.h"
],
"src": [
"test/core/util/reconnect_server.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
]
},
{ {
"name": "grpc++", "name": "grpc++",
"build": "all", "build": "all",
@ -1680,23 +1695,6 @@
"gpr" "gpr"
] ]
}, },
{
"name": "poll_kick_posix_test",
"build": "test",
"language": "c",
"src": [
"test/core/iomgr/poll_kick_posix_test.c"
],
"deps": [
"grpc_test_util",
"grpc",
"gpr_test_util",
"gpr"
],
"platforms": [
"posix"
]
},
{ {
"name": "resolve_address_test", "name": "resolve_address_test",
"build": "test", "build": "test",
@ -2441,6 +2439,49 @@
"grpc++_test_config" "grpc++_test_config"
] ]
}, },
{
"name": "reconnect_interop_client",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/proto/empty.proto",
"test/proto/messages.proto",
"test/proto/test.proto",
"test/cpp/interop/reconnect_interop_client.cc"
],
"deps": [
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr",
"grpc++_test_config"
]
},
{
"name": "reconnect_interop_server",
"build": "test",
"run": false,
"language": "c++",
"src": [
"test/proto/empty.proto",
"test/proto/messages.proto",
"test/proto/test.proto",
"test/cpp/interop/reconnect_interop_server.cc"
],
"deps": [
"reconnect_server",
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr",
"grpc++_test_config"
]
},
{ {
"name": "secure_auth_context_test", "name": "secure_auth_context_test",
"build": "test", "build": "test",

@ -188,7 +188,6 @@ Pod::Spec.new do |s|
'src/core/iomgr/iomgr_internal.h', 'src/core/iomgr/iomgr_internal.h',
'src/core/iomgr/iomgr_posix.h', 'src/core/iomgr/iomgr_posix.h',
'src/core/iomgr/pollset.h', 'src/core/iomgr/pollset.h',
'src/core/iomgr/pollset_kick_posix.h',
'src/core/iomgr/pollset_posix.h', 'src/core/iomgr/pollset_posix.h',
'src/core/iomgr/pollset_set.h', 'src/core/iomgr/pollset_set.h',
'src/core/iomgr/pollset_set_posix.h', 'src/core/iomgr/pollset_set_posix.h',
@ -317,7 +316,6 @@ Pod::Spec.new do |s|
'src/core/iomgr/iomgr.c', 'src/core/iomgr/iomgr.c',
'src/core/iomgr/iomgr_posix.c', 'src/core/iomgr/iomgr_posix.c',
'src/core/iomgr/iomgr_windows.c', 'src/core/iomgr/iomgr_windows.c',
'src/core/iomgr/pollset_kick_posix.c',
'src/core/iomgr/pollset_multipoller_with_epoll.c', 'src/core/iomgr/pollset_multipoller_with_epoll.c',
'src/core/iomgr/pollset_multipoller_with_poll_posix.c', 'src/core/iomgr/pollset_multipoller_with_poll_posix.c',
'src/core/iomgr/pollset_posix.c', 'src/core/iomgr/pollset_posix.c',
@ -458,7 +456,6 @@ Pod::Spec.new do |s|
'src/core/iomgr/iomgr_internal.h', 'src/core/iomgr/iomgr_internal.h',
'src/core/iomgr/iomgr_posix.h', 'src/core/iomgr/iomgr_posix.h',
'src/core/iomgr/pollset.h', 'src/core/iomgr/pollset.h',
'src/core/iomgr/pollset_kick_posix.h',
'src/core/iomgr/pollset_posix.h', 'src/core/iomgr/pollset_posix.h',
'src/core/iomgr/pollset_set.h', 'src/core/iomgr/pollset_set.h',
'src/core/iomgr/pollset_set_posix.h', 'src/core/iomgr/pollset_set_posix.h',

@ -412,10 +412,17 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cq,
otherwise a grpc_event describing the event that occurred. otherwise a grpc_event describing the event that occurred.
Callers must not call grpc_completion_queue_next and Callers must not call grpc_completion_queue_next and
grpc_completion_queue_pluck simultaneously on the same completion queue. */ grpc_completion_queue_pluck simultaneously on the same completion queue.
Completion queues support a maximum of GRPC_MAX_COMPLETION_QUEUE_PLUCKERS
concurrently executing plucks at any time. */
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cq, void *tag,
gpr_timespec deadline); gpr_timespec deadline);
/** Maximum number of outstanding grpc_completion_queue_pluck executions per
completion queue */
#define GRPC_MAX_COMPLETION_QUEUE_PLUCKERS 6
/** Begin destruction of a completion queue. Once all possible events are /** Begin destruction of a completion queue. Once all possible events are
drained then grpc_completion_queue_next will start to produce drained then grpc_completion_queue_next will start to produce
GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call GRPC_QUEUE_SHUTDOWN events only. At that point it's safe to call

@ -322,8 +322,8 @@ static void continue_connect(grpc_subchannel *c) {
static void start_connect(grpc_subchannel *c) { static void start_connect(grpc_subchannel *c) {
c->backoff_delta = gpr_time_from_seconds( c->backoff_delta = gpr_time_from_seconds(
GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS, GPR_TIMESPAN); GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS, GPR_TIMESPAN);
c->next_attempt = gpr_time_add( c->next_attempt =
gpr_now(GPR_CLOCK_MONOTONIC), c->backoff_delta); gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), c->backoff_delta);
continue_connect(c); continue_connect(c);
} }
@ -511,8 +511,6 @@ static void publish_transport(grpc_subchannel *c) {
connection *destroy_connection = NULL; connection *destroy_connection = NULL;
grpc_channel_element *elem; grpc_channel_element *elem;
gpr_log(GPR_DEBUG, "publish_transport: %p", c->master);
/* build final filter list */ /* build final filter list */
num_filters = c->num_filters + c->connecting_result.num_filters + 1; num_filters = c->num_filters + c->connecting_result.num_filters + 1;
filters = gpr_malloc(sizeof(*filters) * num_filters); filters = gpr_malloc(sizeof(*filters) * num_filters);

@ -168,13 +168,19 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
return (gpr_atm_acq_load(&fd->refst) & 1) == 0; return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
} }
static void pollset_kick_locked(grpc_pollset *pollset) {
gpr_mu_lock(GRPC_POLLSET_MU(pollset));
grpc_pollset_kick(pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(pollset));
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) { static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset); pollset_kick_locked(fd->inactive_watcher_root.next->pollset);
} else if (fd->read_watcher) { } else if (fd->read_watcher) {
grpc_pollset_force_kick(fd->read_watcher->pollset); pollset_kick_locked(fd->read_watcher->pollset);
} else if (fd->write_watcher) { } else if (fd->write_watcher) {
grpc_pollset_force_kick(fd->write_watcher->pollset); pollset_kick_locked(fd->write_watcher->pollset);
} }
} }
@ -188,13 +194,13 @@ static void wake_all_watchers_locked(grpc_fd *fd) {
grpc_fd_watcher *watcher; grpc_fd_watcher *watcher;
for (watcher = fd->inactive_watcher_root.next; for (watcher = fd->inactive_watcher_root.next;
watcher != &fd->inactive_watcher_root; watcher = watcher->next) { watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
grpc_pollset_force_kick(watcher->pollset); pollset_kick_locked(watcher->pollset);
} }
if (fd->read_watcher) { if (fd->read_watcher) {
grpc_pollset_force_kick(fd->read_watcher->pollset); pollset_kick_locked(fd->read_watcher->pollset);
} }
if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
grpc_pollset_force_kick(fd->write_watcher->pollset); pollset_kick_locked(fd->write_watcher->pollset);
} }
} }

@ -109,7 +109,8 @@ grpc_fd *grpc_fd_create(int fd, const char *name);
on_done is called when the underlying file descriptor is definitely close()d. on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made. If on_done is NULL, no callback will be made.
Requires: *fd initialized; no outstanding notify_on_read or Requires: *fd initialized; no outstanding notify_on_read or
notify_on_write. */ notify_on_write.
MUST NOT be called with a pollset lock taken */
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done, void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
const char *reason); const char *reason);
@ -122,11 +123,13 @@ void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
i.e. a combination of read_mask and write_mask determined by the fd's current i.e. a combination of read_mask and write_mask determined by the fd's current
interest in said events. interest in said events.
Polling strategies that do not need to alter their behavior depending on the Polling strategies that do not need to alter their behavior depending on the
fd's current interest (such as epoll) do not need to call this function. */ fd's current interest (such as epoll) do not need to call this function.
MUST NOT be called with a pollset lock taken */
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
gpr_uint32 read_mask, gpr_uint32 write_mask, gpr_uint32 read_mask, gpr_uint32 write_mask,
grpc_fd_watcher *rec); grpc_fd_watcher *rec);
/* Complete polling previously started with grpc_fd_begin_poll */ /* Complete polling previously started with grpc_fd_begin_poll
MUST NOT be called with a pollset lock taken */
void grpc_fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write); void grpc_fd_end_poll(grpc_fd_watcher *rec, int got_read, int got_write);
/* Return 1 if this fd is orphaned, 0 otherwise */ /* Return 1 if this fd is orphaned, 0 otherwise */

@ -37,6 +37,8 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/* A grpc_pollset is a set of file descriptors that a higher level item is /* A grpc_pollset is a set of file descriptors that a higher level item is
interested in. For example: interested in. For example:
- a server will typically keep a pollset containing all connected channels, - a server will typically keep a pollset containing all connected channels,
@ -64,12 +66,23 @@ void grpc_pollset_destroy(grpc_pollset *pollset);
Requires GRPC_POLLSET_MU(pollset) locked. Requires GRPC_POLLSET_MU(pollset) locked.
May unlock GRPC_POLLSET_MU(pollset) during its execution. May unlock GRPC_POLLSET_MU(pollset) during its execution.
worker is a (platform-specific) handle that can be used to wake up
from grpc_pollset_work before any events are received and before the timeout
has expired. It is both initialized and destroyed by grpc_pollset_work.
Initialization of worker is guaranteed to occur BEFORE the
GRPC_POLLSET_MU(pollset) is released for the first time by
grpc_pollset_work, and it is guaranteed that GRPC_POLLSET_MU(pollset) will
not be released by grpc_pollset_work AFTER worker has been destroyed.
Returns true if some work has been done, and false if the deadline Returns true if some work has been done, and false if the deadline
got attained. */ expired. */
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline); int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline);
/* Break one polling thread out of polling work for this pollset. /* Break one polling thread out of polling work for this pollset.
Requires GRPC_POLLSET_MU(pollset) locked. */ If specific_worker is GRPC_POLLSET_KICK_BROADCAST, kick ALL the workers.
void grpc_pollset_kick(grpc_pollset *pollset); Otherwise, if specific_worker is non-NULL, then kick that worker. */
void grpc_pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */ #endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */

@ -1,168 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_kick_posix.h"
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
/* This implementation is based on a freelist of wakeup fds, with extra logic to
* handle kicks while there is no attached fd. */
/* TODO(klempner): Autosize this, and consider providing a way to disable the
* cap entirely on systems with large fd limits */
#define GRPC_MAX_CACHED_WFDS 50
static grpc_kick_fd_info *fd_freelist = NULL;
static int fd_freelist_count = 0;
static gpr_mu fd_freelist_mu;
static grpc_kick_fd_info *allocate_wfd(void) {
grpc_kick_fd_info *info = NULL;
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist != NULL) {
info = fd_freelist;
fd_freelist = fd_freelist->next;
--fd_freelist_count;
}
gpr_mu_unlock(&fd_freelist_mu);
if (info == NULL) {
info = gpr_malloc(sizeof(*info));
grpc_wakeup_fd_create(&info->wakeup_fd);
info->next = NULL;
}
return info;
}
static void destroy_wfd(grpc_kick_fd_info *wfd) {
grpc_wakeup_fd_destroy(&wfd->wakeup_fd);
gpr_free(wfd);
}
static void free_wfd(grpc_kick_fd_info *fd_info) {
gpr_mu_lock(&fd_freelist_mu);
if (fd_freelist_count < GRPC_MAX_CACHED_WFDS) {
fd_info->next = fd_freelist;
fd_freelist = fd_info;
fd_freelist_count++;
fd_info = NULL;
}
gpr_mu_unlock(&fd_freelist_mu);
if (fd_info) {
destroy_wfd(fd_info);
}
}
void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state) {
gpr_mu_init(&kick_state->mu);
kick_state->kicked = 0;
kick_state->fd_list.next = kick_state->fd_list.prev = &kick_state->fd_list;
}
void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state) {
gpr_mu_destroy(&kick_state->mu);
GPR_ASSERT(kick_state->fd_list.next == &kick_state->fd_list);
}
grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
grpc_pollset_kick_state *kick_state) {
grpc_kick_fd_info *fd_info;
gpr_mu_lock(&kick_state->mu);
if (kick_state->kicked) {
kick_state->kicked = 0;
gpr_mu_unlock(&kick_state->mu);
return NULL;
}
fd_info = allocate_wfd();
fd_info->next = &kick_state->fd_list;
fd_info->prev = fd_info->next->prev;
fd_info->next->prev = fd_info->prev->next = fd_info;
gpr_mu_unlock(&kick_state->mu);
return fd_info;
}
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info) {
grpc_wakeup_fd_consume_wakeup(&fd_info->wakeup_fd);
}
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info) {
gpr_mu_lock(&kick_state->mu);
fd_info->next->prev = fd_info->prev;
fd_info->prev->next = fd_info->next;
free_wfd(fd_info);
gpr_mu_unlock(&kick_state->mu);
}
void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state) {
gpr_mu_lock(&kick_state->mu);
if (kick_state->fd_list.next != &kick_state->fd_list) {
grpc_wakeup_fd_wakeup(&kick_state->fd_list.next->wakeup_fd);
} else {
kick_state->kicked = 1;
}
gpr_mu_unlock(&kick_state->mu);
}
void grpc_pollset_kick_global_init_fallback_fd(void) {
gpr_mu_init(&fd_freelist_mu);
grpc_wakeup_fd_global_init_force_fallback();
}
void grpc_pollset_kick_global_init(void) {
gpr_mu_init(&fd_freelist_mu);
grpc_wakeup_fd_global_init();
}
void grpc_pollset_kick_global_destroy(void) {
while (fd_freelist != NULL) {
grpc_kick_fd_info *current = fd_freelist;
fd_freelist = fd_freelist->next;
destroy_wfd(current);
}
grpc_wakeup_fd_global_destroy();
gpr_mu_destroy(&fd_freelist_mu);
}
#endif /* GPR_POSIX_SOCKET */

@ -1,93 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H
#include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/sync.h>
/* pollset kicking allows breaking a thread out of polling work for
a given pollset.
writing a byte to a pipe is used as a posix-ly portable base
mechanism, and eventfds are utilized on Linux for better performance. */
typedef struct grpc_kick_fd_info {
grpc_wakeup_fd_info wakeup_fd;
/* used for polling list and free list */
struct grpc_kick_fd_info *next;
/* only used when polling */
struct grpc_kick_fd_info *prev;
} grpc_kick_fd_info;
typedef struct grpc_pollset_kick_state {
gpr_mu mu;
int kicked;
struct grpc_kick_fd_info fd_list;
} grpc_pollset_kick_state;
#define GRPC_POLLSET_KICK_GET_FD(kick_fd_info) \
GRPC_WAKEUP_FD_GET_READ_FD(&(kick_fd_info)->wakeup_fd)
/* This is an abstraction around the typical pipe mechanism for waking up a
thread sitting in a poll() style call. */
void grpc_pollset_kick_global_init(void);
void grpc_pollset_kick_global_destroy(void);
void grpc_pollset_kick_init(grpc_pollset_kick_state *kick_state);
void grpc_pollset_kick_destroy(grpc_pollset_kick_state *kick_state);
/* Guarantees a pure posix implementation rather than a specialized one, if
* applicable. Intended for testing. */
void grpc_pollset_kick_global_init_fallback_fd(void);
/* Must be called before entering poll(). If return value is NULL, this consumed
an existing kick. Otherwise the return value is an FD to add to the poll set.
*/
grpc_kick_fd_info *grpc_pollset_kick_pre_poll(
grpc_pollset_kick_state *kick_state);
/* Consume an existing kick. Must be called after poll returns that the fd was
readable, and before calling kick_post_poll. */
void grpc_pollset_kick_consume(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info);
/* Must be called after pre_poll, and after consume if applicable */
void grpc_pollset_kick_post_poll(grpc_pollset_kick_state *kick_state,
grpc_kick_fd_info *fd_info);
/* Actually kick */
void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_KICK_POSIX_H */

@ -36,6 +36,7 @@
#ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL #ifdef GPR_LINUX_MULTIPOLL_WITH_EPOLL
#include <errno.h> #include <errno.h>
#include <poll.h>
#include <string.h> #include <string.h>
#include <sys/epoll.h> #include <sys/epoll.h>
#include <unistd.h> #include <unistd.h>
@ -44,23 +45,28 @@
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
typedef struct wakeup_fd_hdl {
grpc_wakeup_fd wakeup_fd;
struct wakeup_fd_hdl *next;
} wakeup_fd_hdl;
typedef struct {
grpc_pollset *pollset;
grpc_fd *fd;
grpc_iomgr_closure closure;
} delayed_add;
typedef struct { typedef struct {
int epoll_fd; int epoll_fd;
grpc_wakeup_fd_info wakeup_fd; wakeup_fd_hdl *free_wakeup_fds;
} pollset_hdr; } pollset_hdr;
static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset, static void finally_add_fd(grpc_pollset *pollset, grpc_fd *fd) {
grpc_fd *fd,
int and_unlock_pollset) {
pollset_hdr *h = pollset->data.ptr; pollset_hdr *h = pollset->data.ptr;
struct epoll_event ev; struct epoll_event ev;
int err; int err;
grpc_fd_watcher watcher; grpc_fd_watcher watcher;
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
}
/* We pretend to be polling whilst adding an fd to keep the fd from being /* We pretend to be polling whilst adding an fd to keep the fd from being
closed during the add. This may result in a spurious wakeup being assigned closed during the add. This may result in a spurious wakeup being assigned
to this pollset whilst adding, but that should be benign. */ to this pollset whilst adding, but that should be benign. */
@ -80,6 +86,52 @@ static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd_end_poll(&watcher, 0, 0); grpc_fd_end_poll(&watcher, 0, 0);
} }
static void perform_delayed_add(void *arg, int iomgr_status) {
delayed_add *da = arg;
int do_shutdown_cb = 0;
if (!grpc_fd_is_orphaned(da->fd)) {
finally_add_fd(da->pollset, da->fd);
}
gpr_mu_lock(&da->pollset->mu);
da->pollset->in_flight_cbs--;
if (da->pollset->shutting_down) {
/* We don't care about this pollset anymore. */
if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
GPR_ASSERT(!grpc_pollset_has_workers(da->pollset));
da->pollset->called_shutdown = 1;
do_shutdown_cb = 1;
}
}
gpr_mu_unlock(&da->pollset->mu);
GRPC_FD_UNREF(da->fd, "delayed_add");
if (do_shutdown_cb) {
da->pollset->shutdown_done_cb(da->pollset->shutdown_done_arg);
}
gpr_free(da);
}
static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
grpc_fd *fd,
int and_unlock_pollset) {
if (and_unlock_pollset) {
gpr_mu_unlock(&pollset->mu);
finally_add_fd(pollset, fd);
} else {
delayed_add *da = gpr_malloc(sizeof(*da));
da->pollset = pollset;
da->fd = fd;
GRPC_FD_REF(fd, "delayed_add");
grpc_iomgr_closure_init(&da->closure, perform_delayed_add, da);
pollset->in_flight_cbs++;
grpc_iomgr_add_callback(&da->closure);
}
}
static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset, static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
grpc_fd *fd, grpc_fd *fd,
int and_unlock_pollset) { int and_unlock_pollset) {
@ -103,12 +155,14 @@ static void multipoll_with_epoll_pollset_del_fd(grpc_pollset *pollset,
#define GRPC_EPOLL_MAX_EVENTS 1000 #define GRPC_EPOLL_MAX_EVENTS 1000
static void multipoll_with_epoll_pollset_maybe_work( static void multipoll_with_epoll_pollset_maybe_work(
grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
int allow_synchronous_callback) { gpr_timespec now, int allow_synchronous_callback) {
struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS]; struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
int ep_rv; int ep_rv;
int poll_rv;
pollset_hdr *h = pollset->data.ptr; pollset_hdr *h = pollset->data.ptr;
int timeout_ms; int timeout_ms;
struct pollfd pfds[2];
/* If you want to ignore epoll's ability to sanely handle parallel pollers, /* If you want to ignore epoll's ability to sanely handle parallel pollers,
* for a more apples-to-apples performance comparison with poll, add a * for a more apples-to-apples performance comparison with poll, add a
@ -116,13 +170,32 @@ static void multipoll_with_epoll_pollset_maybe_work(
* here. * here.
*/ */
pollset->counter += 1;
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now); timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
pfds[0].events = POLLIN;
pfds[0].revents = 0;
pfds[1].fd = h->epoll_fd;
pfds[1].events = POLLIN;
pfds[1].revents = 0;
poll_rv = poll(pfds, 2, timeout_ms);
if (poll_rv < 0) {
if (errno != EINTR) {
gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
}
} else if (poll_rv == 0) {
/* do nothing */
} else {
if (pfds[0].revents) {
grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
}
if (pfds[1].revents) {
do { do {
ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms); ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
if (ep_rv < 0) { if (ep_rv < 0) {
if (errno != EINTR) { if (errno != EINTR) {
gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno)); gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
@ -130,9 +203,6 @@ static void multipoll_with_epoll_pollset_maybe_work(
} else { } else {
int i; int i;
for (i = 0; i < ep_rv; ++i) { for (i = 0; i < ep_rv; ++i) {
if (ep_ev[i].data.ptr == 0) {
grpc_wakeup_fd_consume_wakeup(&h->wakeup_fd);
} else {
grpc_fd *fd = ep_ev[i].data.ptr; grpc_fd *fd = ep_ev[i].data.ptr;
/* TODO(klempner): We might want to consider making err and pri /* TODO(klempner): We might want to consider making err and pri
* separate events */ * separate events */
@ -147,12 +217,11 @@ static void multipoll_with_epoll_pollset_maybe_work(
} }
} }
} }
}
timeout_ms = 0;
} while (ep_rv == GRPC_EPOLL_MAX_EVENTS); } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
}
}
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
pollset->counter -= 1;
} }
static void multipoll_with_epoll_pollset_finish_shutdown( static void multipoll_with_epoll_pollset_finish_shutdown(
@ -160,21 +229,14 @@ static void multipoll_with_epoll_pollset_finish_shutdown(
static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) { static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr; pollset_hdr *h = pollset->data.ptr;
grpc_wakeup_fd_destroy(&h->wakeup_fd);
close(h->epoll_fd); close(h->epoll_fd);
gpr_free(h); gpr_free(h);
} }
static void epoll_kick(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr;
grpc_wakeup_fd_wakeup(&h->wakeup_fd);
}
static const grpc_pollset_vtable multipoll_with_epoll_pollset = { static const grpc_pollset_vtable multipoll_with_epoll_pollset = {
multipoll_with_epoll_pollset_add_fd, multipoll_with_epoll_pollset_add_fd,
multipoll_with_epoll_pollset_del_fd, multipoll_with_epoll_pollset_del_fd,
multipoll_with_epoll_pollset_maybe_work, multipoll_with_epoll_pollset_maybe_work,
epoll_kick,
multipoll_with_epoll_pollset_finish_shutdown, multipoll_with_epoll_pollset_finish_shutdown,
multipoll_with_epoll_pollset_destroy}; multipoll_with_epoll_pollset_destroy};
@ -182,8 +244,6 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
size_t nfds) { size_t nfds) {
size_t i; size_t i;
pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr)); pollset_hdr *h = gpr_malloc(sizeof(pollset_hdr));
struct epoll_event ev;
int err;
pollset->vtable = &multipoll_with_epoll_pollset; pollset->vtable = &multipoll_with_epoll_pollset;
pollset->data.ptr = h; pollset->data.ptr = h;
@ -196,16 +256,6 @@ static void epoll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
for (i = 0; i < nfds; i++) { for (i = 0; i < nfds; i++) {
multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0); multipoll_with_epoll_pollset_add_fd(pollset, fds[i], 0);
} }
grpc_wakeup_fd_create(&h->wakeup_fd);
ev.events = EPOLLIN;
ev.data.ptr = 0;
err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD,
GRPC_WAKEUP_FD_GET_READ_FD(&h->wakeup_fd), &ev);
if (err < 0) {
gpr_log(GPR_ERROR, "Wakeup fd epoll_ctl failed: %s", strerror(errno));
abort();
}
} }
grpc_platform_become_multipoller_type grpc_platform_become_multipoller = grpc_platform_become_multipoller_type grpc_platform_become_multipoller =

@ -53,12 +53,6 @@ typedef struct {
size_t fd_count; size_t fd_count;
size_t fd_capacity; size_t fd_capacity;
grpc_fd **fds; grpc_fd **fds;
/* fds being polled by the current poller: parallel arrays of pollfd, and
a grpc_fd_watcher */
size_t pfd_count;
size_t pfd_capacity;
grpc_fd_watcher *watchers;
struct pollfd *pfds;
/* fds that have been removed from the pollset explicitly */ /* fds that have been removed from the pollset explicitly */
size_t del_count; size_t del_count;
size_t del_capacity; size_t del_capacity;
@ -102,80 +96,60 @@ static void multipoll_with_poll_pollset_del_fd(grpc_pollset *pollset,
} }
} }
static void end_polling(grpc_pollset *pollset) {
size_t i;
pollset_hdr *h;
h = pollset->data.ptr;
for (i = 1; i < h->pfd_count; i++) {
grpc_fd_end_poll(&h->watchers[i], h->pfds[i].revents & POLLIN,
h->pfds[i].revents & POLLOUT);
}
}
static void multipoll_with_poll_pollset_maybe_work( static void multipoll_with_poll_pollset_maybe_work(
grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
int allow_synchronous_callback) { gpr_timespec now, int allow_synchronous_callback) {
int timeout; int timeout;
int r; int r;
size_t i, np, nf, nd; size_t i, j, pfd_count, fd_count;
pollset_hdr *h; pollset_hdr *h;
grpc_kick_fd_info *kfd; /* TODO(ctiller): inline some elements to avoid an allocation */
grpc_fd_watcher *watchers;
struct pollfd *pfds;
h = pollset->data.ptr; h = pollset->data.ptr;
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
if (h->pfd_capacity < h->fd_count + 1) { /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1); pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 1));
gpr_free(h->pfds); watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 1));
gpr_free(h->watchers); fd_count = 0;
h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity); pfd_count = 1;
h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity); pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
} pfds[0].events = POLLIN;
nf = 0; pfds[0].revents = POLLOUT;
np = 1;
kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
if (kfd == NULL) {
/* Already kicked */
return;
}
h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
h->pfds[0].events = POLLIN;
h->pfds[0].revents = POLLOUT;
for (i = 0; i < h->fd_count; i++) { for (i = 0; i < h->fd_count; i++) {
int remove = grpc_fd_is_orphaned(h->fds[i]); int remove = grpc_fd_is_orphaned(h->fds[i]);
for (nd = 0; nd < h->del_count; nd++) { for (j = 0; !remove && j < h->del_count; j++) {
if (h->fds[i] == h->dels[nd]) remove = 1; if (h->fds[i] == h->dels[j]) remove = 1;
} }
if (remove) { if (remove) {
GRPC_FD_UNREF(h->fds[i], "multipoller"); GRPC_FD_UNREF(h->fds[i], "multipoller");
} else { } else {
h->fds[nf++] = h->fds[i]; h->fds[fd_count++] = h->fds[i];
h->watchers[np].fd = h->fds[i]; watchers[pfd_count].fd = h->fds[i];
h->pfds[np].fd = h->fds[i]->fd; pfds[pfd_count].fd = h->fds[i]->fd;
h->pfds[np].revents = 0; pfds[pfd_count].revents = 0;
np++; pfd_count++;
} }
} }
h->pfd_count = np; for (j = 0; j < h->del_count; j++) {
h->fd_count = nf; GRPC_FD_UNREF(h->dels[j], "multipoller_del");
for (nd = 0; nd < h->del_count; nd++) {
GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
} }
h->del_count = 0; h->del_count = 0;
if (h->pfd_count == 0) { h->fd_count = fd_count;
end_polling(pollset);
return;
}
pollset->counter++;
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
for (i = 1; i < np; i++) { for (i = 1; i < pfd_count; i++) {
h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN, pfds[i].events = grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN,
POLLOUT, &h->watchers[i]); POLLOUT, &watchers[i]);
} }
r = poll(h->pfds, h->pfd_count, timeout); r = poll(pfds, pfd_count, timeout);
end_polling(pollset); for (i = 1; i < pfd_count; i++) {
grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,
pfds[i].revents & POLLOUT);
}
if (r < 0) { if (r < 0) {
if (errno != EINTR) { if (errno != EINTR) {
@ -184,35 +158,31 @@ static void multipoll_with_poll_pollset_maybe_work(
} else if (r == 0) { } else if (r == 0) {
/* do nothing */ /* do nothing */
} else { } else {
if (h->pfds[0].revents & POLLIN) { if (pfds[0].revents & POLLIN) {
grpc_pollset_kick_consume(&pollset->kick_state, kfd); grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
} }
for (i = 1; i < np; i++) { for (i = 1; i < pfd_count; i++) {
if (h->watchers[i].fd == NULL) { if (watchers[i].fd == NULL) {
continue; continue;
} }
if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback); grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback);
} }
if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) { if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback); grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback);
} }
} }
} }
grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
gpr_mu_lock(&pollset->mu); gpr_free(pfds);
pollset->counter--; gpr_free(watchers);
}
static void multipoll_with_poll_pollset_kick(grpc_pollset *p) { gpr_mu_lock(&pollset->mu);
grpc_pollset_force_kick(p);
} }
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) { static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
size_t i; size_t i;
pollset_hdr *h = pollset->data.ptr; pollset_hdr *h = pollset->data.ptr;
GPR_ASSERT(pollset->counter == 0);
for (i = 0; i < h->fd_count; i++) { for (i = 0; i < h->fd_count; i++) {
GRPC_FD_UNREF(h->fds[i], "multipoller"); GRPC_FD_UNREF(h->fds[i], "multipoller");
} }
@ -226,8 +196,6 @@ static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) { static void multipoll_with_poll_pollset_destroy(grpc_pollset *pollset) {
pollset_hdr *h = pollset->data.ptr; pollset_hdr *h = pollset->data.ptr;
multipoll_with_poll_pollset_finish_shutdown(pollset); multipoll_with_poll_pollset_finish_shutdown(pollset);
gpr_free(h->pfds);
gpr_free(h->watchers);
gpr_free(h->fds); gpr_free(h->fds);
gpr_free(h->dels); gpr_free(h->dels);
gpr_free(h); gpr_free(h);
@ -237,7 +205,6 @@ static const grpc_pollset_vtable multipoll_with_poll_pollset = {
multipoll_with_poll_pollset_add_fd, multipoll_with_poll_pollset_add_fd,
multipoll_with_poll_pollset_del_fd, multipoll_with_poll_pollset_del_fd,
multipoll_with_poll_pollset_maybe_work, multipoll_with_poll_pollset_maybe_work,
multipoll_with_poll_pollset_kick,
multipoll_with_poll_pollset_finish_shutdown, multipoll_with_poll_pollset_finish_shutdown,
multipoll_with_poll_pollset_destroy}; multipoll_with_poll_pollset_destroy};
@ -250,10 +217,6 @@ void grpc_poll_become_multipoller(grpc_pollset *pollset, grpc_fd **fds,
h->fd_count = nfds; h->fd_count = nfds;
h->fd_capacity = nfds; h->fd_capacity = nfds;
h->fds = gpr_malloc(nfds * sizeof(grpc_fd *)); h->fds = gpr_malloc(nfds * sizeof(grpc_fd *));
h->pfd_count = 0;
h->pfd_capacity = 0;
h->pfds = NULL;
h->watchers = NULL;
h->del_count = 0; h->del_count = 0;
h->del_capacity = 0; h->del_capacity = 0;
h->dels = NULL; h->dels = NULL;

@ -55,22 +55,60 @@
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
GPR_TLS_DECL(g_current_thread_poller); GPR_TLS_DECL(g_current_thread_poller);
GPR_TLS_DECL(g_current_thread_worker);
void grpc_pollset_kick(grpc_pollset *p) { static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p && p->counter) { worker->prev->next = worker->next;
p->vtable->kick(p); worker->next->prev = worker->prev;
} }
int grpc_pollset_has_workers(grpc_pollset *p) {
return p->root_worker.next != &p->root_worker;
} }
void grpc_pollset_force_kick(grpc_pollset *p) { static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) { if (grpc_pollset_has_workers(p)) {
grpc_pollset_kick_kick(&p->kick_state); grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w);
return w;
} else {
return NULL;
} }
} }
static void kick_using_pollset_kick(grpc_pollset *p) { static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) { worker->next = &p->root_worker;
grpc_pollset_kick_kick(&p->kick_state); worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
}
static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
}
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
p->kicked_without_pollers = 1;
} else if (gpr_tls_get(&g_current_thread_worker) !=
(gpr_intptr)specific_worker) {
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
}
} else if (gpr_tls_get(&g_current_thread_poller) != (gpr_intptr)p) {
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
push_back_worker(p, specific_worker);
grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd);
} else {
p->kicked_without_pollers = 1;
}
} }
} }
@ -78,16 +116,12 @@ static void kick_using_pollset_kick(grpc_pollset *p) {
void grpc_pollset_global_init(void) { void grpc_pollset_global_init(void) {
gpr_tls_init(&g_current_thread_poller); gpr_tls_init(&g_current_thread_poller);
grpc_wakeup_fd_global_init();
/* Initialize kick fd state */
grpc_pollset_kick_global_init();
} }
void grpc_pollset_global_shutdown(void) { void grpc_pollset_global_shutdown(void) {
/* destroy the kick pipes */
grpc_pollset_kick_global_destroy();
gpr_tls_destroy(&g_current_thread_poller); gpr_tls_destroy(&g_current_thread_poller);
grpc_wakeup_fd_global_destroy();
} }
/* main interface */ /* main interface */
@ -96,7 +130,7 @@ static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
void grpc_pollset_init(grpc_pollset *pollset) { void grpc_pollset_init(grpc_pollset *pollset) {
gpr_mu_init(&pollset->mu); gpr_mu_init(&pollset->mu);
grpc_pollset_kick_init(&pollset->kick_state); pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->in_flight_cbs = 0; pollset->in_flight_cbs = 0;
pollset->shutting_down = 0; pollset->shutting_down = 0;
pollset->called_shutdown = 0; pollset->called_shutdown = 0;
@ -134,27 +168,44 @@ static void finish_shutdown(grpc_pollset *pollset) {
pollset->shutdown_done_cb(pollset->shutdown_done_arg); pollset->shutdown_done_cb(pollset->shutdown_done_arg);
} }
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
/* pollset->mu already held */ /* pollset->mu already held */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
int added_worker = 0;
if (gpr_time_cmp(now, deadline) > 0) { if (gpr_time_cmp(now, deadline) > 0) {
return 0; return 0;
} }
/* this must happen before we (potentially) drop pollset->mu */
worker->next = worker->prev = NULL;
/* TODO(ctiller): pool these */
grpc_wakeup_fd_init(&worker->wakeup_fd);
if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) { if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) {
return 1; goto done;
} }
if (grpc_alarm_check(&pollset->mu, now, &deadline)) { if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
return 1; goto done;
} }
if (pollset->shutting_down) { if (pollset->shutting_down) {
return 1; goto done;
} }
if (!pollset->kicked_without_pollers) {
push_front_worker(pollset, worker);
added_worker = 1;
gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset); gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
pollset->vtable->maybe_work(pollset, deadline, now, 1); pollset->vtable->maybe_work(pollset, worker, deadline, now, 1);
gpr_tls_set(&g_current_thread_poller, 0); gpr_tls_set(&g_current_thread_poller, 0);
} else {
pollset->kicked_without_pollers = 0;
}
done:
grpc_wakeup_fd_destroy(&worker->wakeup_fd);
if (added_worker) {
remove_worker(pollset, worker);
}
if (pollset->shutting_down) { if (pollset->shutting_down) {
if (pollset->counter > 0) { if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset); grpc_pollset_kick(pollset, NULL);
} else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) { } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
pollset->called_shutdown = 1; pollset->called_shutdown = 1;
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
@ -177,15 +228,13 @@ void grpc_pollset_shutdown(grpc_pollset *pollset,
GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->shutting_down);
pollset->shutting_down = 1; pollset->shutting_down = 1;
if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 && if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
pollset->counter == 0) { !grpc_pollset_has_workers(pollset)) {
pollset->called_shutdown = 1; pollset->called_shutdown = 1;
call_shutdown = 1; call_shutdown = 1;
} }
pollset->shutdown_done_cb = shutdown_done; pollset->shutdown_done_cb = shutdown_done;
pollset->shutdown_done_arg = shutdown_done_arg; pollset->shutdown_done_arg = shutdown_done_arg;
if (pollset->counter > 0) { grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
grpc_pollset_kick(pollset);
}
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
if (call_shutdown) { if (call_shutdown) {
@ -196,8 +245,8 @@ void grpc_pollset_shutdown(grpc_pollset *pollset,
void grpc_pollset_destroy(grpc_pollset *pollset) { void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->shutting_down); GPR_ASSERT(pollset->shutting_down);
GPR_ASSERT(pollset->in_flight_cbs == 0); GPR_ASSERT(pollset->in_flight_cbs == 0);
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
pollset->vtable->destroy(pollset); pollset->vtable->destroy(pollset);
grpc_pollset_kick_destroy(&pollset->kick_state);
gpr_mu_destroy(&pollset->mu); gpr_mu_destroy(&pollset->mu);
} }
@ -248,8 +297,8 @@ static void basic_do_promote(void *args, int success) {
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
/* First we need to ensure that nobody is polling concurrently */ /* First we need to ensure that nobody is polling concurrently */
if (pollset->counter != 0) { if (grpc_pollset_has_workers(pollset)) {
grpc_pollset_kick(pollset); grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
grpc_iomgr_add_callback(&up_args->promotion_closure); grpc_iomgr_add_callback(&up_args->promotion_closure);
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
return; return;
@ -264,7 +313,8 @@ static void basic_do_promote(void *args, int success) {
pollset->in_flight_cbs--; pollset->in_flight_cbs--;
if (pollset->shutting_down) { if (pollset->shutting_down) {
/* We don't care about this pollset anymore. */ /* We don't care about this pollset anymore. */
if (pollset->in_flight_cbs == 0 && pollset->counter == 0 && !pollset->called_shutdown) { if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
pollset->called_shutdown = 1; pollset->called_shutdown = 1;
do_shutdown_cb = 1; do_shutdown_cb = 1;
} }
@ -307,7 +357,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
GPR_ASSERT(fd); GPR_ASSERT(fd);
if (fd == pollset->data.ptr) goto exit; if (fd == pollset->data.ptr) goto exit;
if (!pollset->counter) { if (!grpc_pollset_has_workers(pollset)) {
/* Fast path -- no in flight cbs */ /* Fast path -- no in flight cbs */
/* TODO(klempner): Comment this out and fix any test failures or establish /* TODO(klempner): Comment this out and fix any test failures or establish
* they are due to timing issues */ * they are due to timing issues */
@ -343,7 +393,7 @@ static void basic_pollset_add_fd(grpc_pollset *pollset, grpc_fd *fd,
up_args->promotion_closure.cb_arg = up_args; up_args->promotion_closure.cb_arg = up_args;
grpc_iomgr_add_callback(&up_args->promotion_closure); grpc_iomgr_add_callback(&up_args->promotion_closure);
grpc_pollset_kick(pollset); grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
exit: exit:
if (and_unlock_pollset) { if (and_unlock_pollset) {
@ -365,12 +415,12 @@ static void basic_pollset_del_fd(grpc_pollset *pollset, grpc_fd *fd,
} }
static void basic_pollset_maybe_work(grpc_pollset *pollset, static void basic_pollset_maybe_work(grpc_pollset *pollset,
grpc_pollset_worker *worker,
gpr_timespec deadline, gpr_timespec now, gpr_timespec deadline, gpr_timespec now,
int allow_synchronous_callback) { int allow_synchronous_callback) {
struct pollfd pfd[2]; struct pollfd pfd[2];
grpc_fd *fd; grpc_fd *fd;
grpc_fd_watcher fd_watcher; grpc_fd_watcher fd_watcher;
grpc_kick_fd_info *kfd;
int timeout; int timeout;
int r; int r;
int nfds; int nfds;
@ -387,16 +437,10 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
fd = pollset->data.ptr = NULL; fd = pollset->data.ptr = NULL;
} }
timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state); pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
if (kfd == NULL) {
/* Already kicked */
return;
}
pfd[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
pfd[0].events = POLLIN; pfd[0].events = POLLIN;
pfd[0].revents = 0; pfd[0].revents = 0;
nfds = 1; nfds = 1;
pollset->counter++;
if (fd) { if (fd) {
pfd[1].fd = fd->fd; pfd[1].fd = fd->fd;
pfd[1].revents = 0; pfd[1].revents = 0;
@ -428,7 +472,7 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
/* do nothing */ /* do nothing */
} else { } else {
if (pfd[0].revents & POLLIN) { if (pfd[0].revents & POLLIN) {
grpc_pollset_kick_consume(&pollset->kick_state, kfd); grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
} }
if (nfds > 1) { if (nfds > 1) {
if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) { if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
@ -440,14 +484,10 @@ static void basic_pollset_maybe_work(grpc_pollset *pollset,
} }
} }
grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
pollset->counter--;
} }
static void basic_pollset_destroy(grpc_pollset *pollset) { static void basic_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(pollset->counter == 0);
if (pollset->data.ptr != NULL) { if (pollset->data.ptr != NULL) {
GRPC_FD_UNREF(pollset->data.ptr, "basicpoll"); GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
pollset->data.ptr = NULL; pollset->data.ptr = NULL;
@ -456,13 +496,12 @@ static void basic_pollset_destroy(grpc_pollset *pollset) {
static const grpc_pollset_vtable basic_pollset = { static const grpc_pollset_vtable basic_pollset = {
basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work, basic_pollset_add_fd, basic_pollset_del_fd, basic_pollset_maybe_work,
kick_using_pollset_kick, basic_pollset_destroy, basic_pollset_destroy}; basic_pollset_destroy, basic_pollset_destroy};
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) { static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
pollset->vtable = &basic_pollset; pollset->vtable = &basic_pollset;
pollset->counter = 0;
pollset->data.ptr = fd_or_null; pollset->data.ptr = fd_or_null;
if (fd_or_null) { if (fd_or_null != NULL) {
GRPC_FD_REF(fd_or_null, "basicpoll"); GRPC_FD_REF(fd_or_null, "basicpoll");
} }
} }

@ -35,8 +35,7 @@
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H #define GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include "src/core/iomgr/wakeup_fd_posix.h"
#include "src/core/iomgr/pollset_kick_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable; typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@ -45,6 +44,12 @@ typedef struct grpc_pollset_vtable grpc_pollset_vtable;
use the struct tag */ use the struct tag */
struct grpc_fd; struct grpc_fd;
typedef struct grpc_pollset_worker {
grpc_wakeup_fd wakeup_fd;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker;
typedef struct grpc_pollset { typedef struct grpc_pollset {
/* pollsets under posix can mutate representation as fds are added and /* pollsets under posix can mutate representation as fds are added and
removed. removed.
@ -52,11 +57,11 @@ typedef struct grpc_pollset {
few fds, and an epoll() based implementation for many fds */ few fds, and an epoll() based implementation for many fds */
const grpc_pollset_vtable *vtable; const grpc_pollset_vtable *vtable;
gpr_mu mu; gpr_mu mu;
grpc_pollset_kick_state kick_state; grpc_pollset_worker root_worker;
int counter;
int in_flight_cbs; int in_flight_cbs;
int shutting_down; int shutting_down;
int called_shutdown; int called_shutdown;
int kicked_without_pollers;
void (*shutdown_done_cb)(void *arg); void (*shutdown_done_cb)(void *arg);
void *shutdown_done_arg; void *shutdown_done_arg;
union { union {
@ -70,9 +75,9 @@ struct grpc_pollset_vtable {
int and_unlock_pollset); int and_unlock_pollset);
void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd, void (*del_fd)(grpc_pollset *pollset, struct grpc_fd *fd,
int and_unlock_pollset); int and_unlock_pollset);
void (*maybe_work)(grpc_pollset *pollset, gpr_timespec deadline, void (*maybe_work)(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec now, int allow_synchronous_callback); gpr_timespec deadline, gpr_timespec now,
void (*kick)(grpc_pollset *pollset); int allow_synchronous_callback);
void (*finish_shutdown)(grpc_pollset *pollset); void (*finish_shutdown)(grpc_pollset *pollset);
void (*destroy)(grpc_pollset *pollset); void (*destroy)(grpc_pollset *pollset);
}; };
@ -85,12 +90,6 @@ void grpc_pollset_add_fd(grpc_pollset *pollset, struct grpc_fd *fd);
poll after an fd is orphaned) */ poll after an fd is orphaned) */
void grpc_pollset_del_fd(grpc_pollset *pollset, struct grpc_fd *fd); void grpc_pollset_del_fd(grpc_pollset *pollset, struct grpc_fd *fd);
/* Force any current pollers to break polling: it's the callers responsibility
to ensure that the pollset indeed needs to be kicked - no verification that
the pollset is actually performing polling work is done. At worst this will
result in spurious wakeups if performed at the wrong moment.
Does not touch pollset->mu. */
void grpc_pollset_force_kick(grpc_pollset *pollset);
/* Returns the fd to listen on for kicks */ /* Returns the fd to listen on for kicks */
int grpc_kick_read_fd(grpc_pollset *p); int grpc_kick_read_fd(grpc_pollset *p);
/* Call after polling has been kicked to leave the kicked state */ /* Call after polling has been kicked to leave the kicked state */
@ -114,4 +113,8 @@ extern grpc_platform_become_multipoller_type grpc_platform_become_multipoller;
void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds, void grpc_poll_become_multipoller(grpc_pollset *pollset, struct grpc_fd **fds,
size_t fd_count); size_t fd_count);
/* Return 1 if the pollset has active threads in grpc_pollset_work (pollset must
* be locked) */
int grpc_pollset_has_workers(grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */ #endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_POSIX_H */

@ -42,6 +42,38 @@
#include "src/core/iomgr/pollset.h" #include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/pollset_windows.h" #include "src/core/iomgr/pollset_windows.h"
static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next;
worker->next->prev = worker->prev;
}
static int has_workers(grpc_pollset *p) {
return p->root_worker.next != &p->root_worker;
}
static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
if (has_workers(p)) {
grpc_pollset_worker *w = p->root_worker.next;
remove_worker(p, w);
return w;
}
else {
return NULL;
}
}
static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->next = &p->root_worker;
worker->prev = worker->next->prev;
worker->prev->next = worker->next->prev = worker;
}
static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev = &p->root_worker;
worker->next = worker->prev->next;
worker->prev->next = worker->next->prev = worker;
}
/* There isn't really any such thing as a pollset under Windows, due to the /* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. We're still going to provide a minimal nature of the IO completion ports. We're still going to provide a minimal
set of features for the sake of the rest of grpc. But grpc_pollset_work set of features for the sake of the rest of grpc. But grpc_pollset_work
@ -50,7 +82,8 @@
void grpc_pollset_init(grpc_pollset *pollset) { void grpc_pollset_init(grpc_pollset *pollset) {
memset(pollset, 0, sizeof(*pollset)); memset(pollset, 0, sizeof(*pollset));
gpr_mu_init(&pollset->mu); gpr_mu_init(&pollset->mu);
gpr_cv_init(&pollset->cv); pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->kicked_without_pollers = 0;
} }
void grpc_pollset_shutdown(grpc_pollset *pollset, void grpc_pollset_shutdown(grpc_pollset *pollset,
@ -58,34 +91,66 @@ void grpc_pollset_shutdown(grpc_pollset *pollset,
void *shutdown_done_arg) { void *shutdown_done_arg) {
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
pollset->shutting_down = 1; pollset->shutting_down = 1;
gpr_cv_broadcast(&pollset->cv); grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
shutdown_done(shutdown_done_arg); shutdown_done(shutdown_done_arg);
} }
void grpc_pollset_destroy(grpc_pollset *pollset) { void grpc_pollset_destroy(grpc_pollset *pollset) {
gpr_mu_destroy(&pollset->mu); gpr_mu_destroy(&pollset->mu);
gpr_cv_destroy(&pollset->cv);
} }
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) {
gpr_timespec now; gpr_timespec now;
int added_worker = 0;
now = gpr_now(GPR_CLOCK_MONOTONIC); now = gpr_now(GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(now, deadline) > 0) { if (gpr_time_cmp(now, deadline) > 0) {
return 0 /* GPR_FALSE */; return 0 /* GPR_FALSE */;
} }
worker->next = worker->prev = NULL;
gpr_cv_init(&worker->cv);
if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) { if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) {
return 1 /* GPR_TRUE */; goto done;
} }
if (grpc_alarm_check(&pollset->mu, now, &deadline)) { if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
return 1 /* GPR_TRUE */; goto done;
}
if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
push_front_worker(pollset, worker);
added_worker = 1;
gpr_cv_wait(&worker->cv, &pollset->mu, deadline);
} else {
pollset->kicked_without_pollers = 0;
} }
if (!pollset->shutting_down) { done:
gpr_cv_wait(&pollset->cv, &pollset->mu, deadline); gpr_cv_destroy(&worker->cv);
if (added_worker) {
remove_worker(pollset, worker);
} }
return 1 /* GPR_TRUE */; return 1 /* GPR_TRUE */;
} }
void grpc_pollset_kick(grpc_pollset *p) { gpr_cv_signal(&p->cv); } void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
if (specific_worker != NULL) {
if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
for (specific_worker = p->root_worker.next;
specific_worker != &p->root_worker;
specific_worker = specific_worker->next) {
gpr_cv_signal(&specific_worker->cv);
}
p->kicked_without_pollers = 1;
} else {
gpr_cv_signal(&specific_worker->cv);
}
} else {
specific_worker = pop_front_worker(p);
if (specific_worker != NULL) {
push_back_worker(p, specific_worker);
gpr_cv_signal(&specific_worker->cv);
} else {
p->kicked_without_pollers = 1;
}
}
}
#endif /* GPR_WINSOCK_SOCKET */ #endif /* GPR_WINSOCK_SOCKET */

@ -40,12 +40,20 @@
/* There isn't really any such thing as a pollset under Windows, due to the /* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. A Windows "pollset" is merely a mutex nature of the IO completion ports. A Windows "pollset" is merely a mutex
and a condition variable, used to synchronize with the IOCP. */ used to synchronize with the IOCP, and workers are condition variables
used to block threads until work is ready. */
typedef struct grpc_pollset_worker {
gpr_cv cv;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
} grpc_pollset_worker;
typedef struct grpc_pollset { typedef struct grpc_pollset {
gpr_mu mu; gpr_mu mu;
gpr_cv cv;
int shutting_down; int shutting_down;
int kicked_without_pollers;
grpc_pollset_worker root_worker;
} grpc_pollset; } grpc_pollset;
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu) #define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)

@ -42,7 +42,7 @@
#include "src/core/iomgr/wakeup_fd_posix.h" #include "src/core/iomgr/wakeup_fd_posix.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
static void eventfd_create(grpc_wakeup_fd_info *fd_info) { static void eventfd_create(grpc_wakeup_fd *fd_info) {
int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); int efd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
/* TODO(klempner): Handle failure more gracefully */ /* TODO(klempner): Handle failure more gracefully */
GPR_ASSERT(efd >= 0); GPR_ASSERT(efd >= 0);
@ -50,7 +50,7 @@ static void eventfd_create(grpc_wakeup_fd_info *fd_info) {
fd_info->write_fd = -1; fd_info->write_fd = -1;
} }
static void eventfd_consume(grpc_wakeup_fd_info *fd_info) { static void eventfd_consume(grpc_wakeup_fd *fd_info) {
eventfd_t value; eventfd_t value;
int err; int err;
do { do {
@ -58,14 +58,14 @@ static void eventfd_consume(grpc_wakeup_fd_info *fd_info) {
} while (err < 0 && errno == EINTR); } while (err < 0 && errno == EINTR);
} }
static void eventfd_wakeup(grpc_wakeup_fd_info *fd_info) { static void eventfd_wakeup(grpc_wakeup_fd *fd_info) {
int err; int err;
do { do {
err = eventfd_write(fd_info->read_fd, 1); err = eventfd_write(fd_info->read_fd, 1);
} while (err < 0 && errno == EINTR); } while (err < 0 && errno == EINTR);
} }
static void eventfd_destroy(grpc_wakeup_fd_info *fd_info) { static void eventfd_destroy(grpc_wakeup_fd *fd_info) {
close(fd_info->read_fd); close(fd_info->read_fd);
} }

@ -44,7 +44,7 @@
#include "src/core/iomgr/socket_utils_posix.h" #include "src/core/iomgr/socket_utils_posix.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
static void pipe_create(grpc_wakeup_fd_info *fd_info) { static void pipe_init(grpc_wakeup_fd *fd_info) {
int pipefd[2]; int pipefd[2];
/* TODO(klempner): Make this nonfatal */ /* TODO(klempner): Make this nonfatal */
GPR_ASSERT(0 == pipe(pipefd)); GPR_ASSERT(0 == pipe(pipefd));
@ -54,7 +54,7 @@ static void pipe_create(grpc_wakeup_fd_info *fd_info) {
fd_info->write_fd = pipefd[1]; fd_info->write_fd = pipefd[1];
} }
static void pipe_consume(grpc_wakeup_fd_info *fd_info) { static void pipe_consume(grpc_wakeup_fd *fd_info) {
char buf[128]; char buf[128];
int r; int r;
@ -74,13 +74,13 @@ static void pipe_consume(grpc_wakeup_fd_info *fd_info) {
} }
} }
static void pipe_wakeup(grpc_wakeup_fd_info *fd_info) { static void pipe_wakeup(grpc_wakeup_fd *fd_info) {
char c = 0; char c = 0;
while (write(fd_info->write_fd, &c, 1) != 1 && errno == EINTR) while (write(fd_info->write_fd, &c, 1) != 1 && errno == EINTR)
; ;
} }
static void pipe_destroy(grpc_wakeup_fd_info *fd_info) { static void pipe_destroy(grpc_wakeup_fd *fd_info) {
close(fd_info->read_fd); close(fd_info->read_fd);
close(fd_info->write_fd); close(fd_info->write_fd);
} }
@ -91,7 +91,7 @@ static int pipe_check_availability(void) {
} }
const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = { const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable = {
pipe_create, pipe_consume, pipe_wakeup, pipe_destroy, pipe_check_availability pipe_init, pipe_consume, pipe_wakeup, pipe_destroy,
}; pipe_check_availability};
#endif /* GPR_POSIX_WAKUP_FD */ #endif /* GPR_POSIX_WAKUP_FD */

@ -57,19 +57,19 @@ void grpc_wakeup_fd_global_destroy(void) {
wakeup_fd_vtable = NULL; wakeup_fd_vtable = NULL;
} }
void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info) { void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->create(fd_info); wakeup_fd_vtable->init(fd_info);
} }
void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info) { void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->consume(fd_info); wakeup_fd_vtable->consume(fd_info);
} }
void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info) { void grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->wakeup(fd_info); wakeup_fd_vtable->wakeup(fd_info);
} }
void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info) { void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info) {
wakeup_fd_vtable->destroy(fd_info); wakeup_fd_vtable->destroy(fd_info);
} }

@ -69,28 +69,28 @@ void grpc_wakeup_fd_global_destroy(void);
* purposes only.*/ * purposes only.*/
void grpc_wakeup_fd_global_init_force_fallback(void); void grpc_wakeup_fd_global_init_force_fallback(void);
typedef struct grpc_wakeup_fd_info grpc_wakeup_fd_info; typedef struct grpc_wakeup_fd grpc_wakeup_fd;
typedef struct grpc_wakeup_fd_vtable { typedef struct grpc_wakeup_fd_vtable {
void (*create)(grpc_wakeup_fd_info *fd_info); void (*init)(grpc_wakeup_fd *fd_info);
void (*consume)(grpc_wakeup_fd_info *fd_info); void (*consume)(grpc_wakeup_fd *fd_info);
void (*wakeup)(grpc_wakeup_fd_info *fd_info); void (*wakeup)(grpc_wakeup_fd *fd_info);
void (*destroy)(grpc_wakeup_fd_info *fd_info); void (*destroy)(grpc_wakeup_fd *fd_info);
/* Must be called before calling any other functions */ /* Must be called before calling any other functions */
int (*check_availability)(void); int (*check_availability)(void);
} grpc_wakeup_fd_vtable; } grpc_wakeup_fd_vtable;
struct grpc_wakeup_fd_info { struct grpc_wakeup_fd {
int read_fd; int read_fd;
int write_fd; int write_fd;
}; };
#define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd) #define GRPC_WAKEUP_FD_GET_READ_FD(fd_info) ((fd_info)->read_fd)
void grpc_wakeup_fd_create(grpc_wakeup_fd_info *fd_info); void grpc_wakeup_fd_init(grpc_wakeup_fd *fd_info);
void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd_info *fd_info); void grpc_wakeup_fd_consume_wakeup(grpc_wakeup_fd *fd_info);
void grpc_wakeup_fd_wakeup(grpc_wakeup_fd_info *fd_info); void grpc_wakeup_fd_wakeup(grpc_wakeup_fd *fd_info);
void grpc_wakeup_fd_destroy(grpc_wakeup_fd_info *fd_info); void grpc_wakeup_fd_destroy(grpc_wakeup_fd *fd_info);
/* Defined in some specialized implementation's .c file, or by /* Defined in some specialized implementation's .c file, or by
* wakeup_fd_nospecial.c if no such implementation exists. */ * wakeup_fd_nospecial.c if no such implementation exists. */

@ -80,7 +80,7 @@ static void on_compute_engine_detection_http_response(
} }
gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset));
detector->is_done = 1; detector->is_done = 1;
grpc_pollset_kick(&detector->pollset); grpc_pollset_kick(&detector->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
} }
@ -112,7 +112,9 @@ static int is_stack_running_on_compute_engine(void) {
called once for the lifetime of the process by the default credentials. */ called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) { while (!detector.is_done) {
grpc_pollset_work(&detector.pollset, gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_pollset_worker worker;
grpc_pollset_work(&detector.pollset, &worker,
gpr_inf_future(GPR_CLOCK_REALTIME));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));

@ -45,6 +45,11 @@
#include <grpc/support/atm.h> #include <grpc/support/atm.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
typedef struct {
grpc_pollset_worker *worker;
void *tag;
} plucker;
/* Completion queue structure */ /* Completion queue structure */
struct grpc_completion_queue { struct grpc_completion_queue {
/** completed events */ /** completed events */
@ -60,6 +65,8 @@ struct grpc_completion_queue {
int shutdown; int shutdown;
int shutdown_called; int shutdown_called;
int is_server_cq; int is_server_cq;
int num_pluckers;
plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
}; };
grpc_completion_queue *grpc_completion_queue_create(void) { grpc_completion_queue *grpc_completion_queue_create(void) {
@ -122,6 +129,8 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
void (*done)(void *done_arg, grpc_cq_completion *storage), void (*done)(void *done_arg, grpc_cq_completion *storage),
void *done_arg, grpc_cq_completion *storage) { void *done_arg, grpc_cq_completion *storage) {
int shutdown; int shutdown;
int i;
grpc_pollset_worker *pluck_worker;
storage->tag = tag; storage->tag = tag;
storage->done = done; storage->done = done;
@ -135,7 +144,14 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
cc->completed_tail->next = cc->completed_tail->next =
((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next); ((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
cc->completed_tail = storage; cc->completed_tail = storage;
grpc_pollset_kick(&cc->pollset); pluck_worker = NULL;
for (i = 0; i < cc->num_pluckers; i++) {
if (cc->pluckers[i].tag == tag) {
pluck_worker = cc->pluckers[i].worker;
break;
}
}
grpc_pollset_kick(&cc->pollset, pluck_worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
} else { } else {
cc->completed_tail->next = cc->completed_tail->next =
@ -152,6 +168,7 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) { gpr_timespec deadline) {
grpc_event ret; grpc_event ret;
grpc_pollset_worker worker;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -177,7 +194,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
ret.type = GRPC_QUEUE_SHUTDOWN; ret.type = GRPC_QUEUE_SHUTDOWN;
break; break;
} }
if (!grpc_pollset_work(&cc->pollset, deadline)) { if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret)); memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT; ret.type = GRPC_QUEUE_TIMEOUT;
@ -189,11 +206,37 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
return ret; return ret;
} }
static int add_plucker(grpc_completion_queue *cc, void *tag,
grpc_pollset_worker *worker) {
if (cc->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS) {
return 0;
}
cc->pluckers[cc->num_pluckers].tag = tag;
cc->pluckers[cc->num_pluckers].worker = worker;
cc->num_pluckers++;
return 1;
}
static void del_plucker(grpc_completion_queue *cc, void *tag,
grpc_pollset_worker *worker) {
int i;
for (i = 0; i < cc->num_pluckers; i++) {
if (cc->pluckers[i].tag == tag && cc->pluckers[i].worker == worker) {
cc->num_pluckers--;
GPR_SWAP(plucker, cc->pluckers[i], cc->pluckers[cc->num_pluckers]);
return;
}
}
gpr_log(GPR_ERROR, "should never reach here");
abort();
}
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec deadline) { gpr_timespec deadline) {
grpc_event ret; grpc_event ret;
grpc_cq_completion *c; grpc_cq_completion *c;
grpc_cq_completion *prev; grpc_cq_completion *prev;
grpc_pollset_worker worker;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
@ -224,12 +267,24 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
ret.type = GRPC_QUEUE_SHUTDOWN; ret.type = GRPC_QUEUE_SHUTDOWN;
break; break;
} }
if (!grpc_pollset_work(&cc->pollset, deadline)) { if (!add_plucker(cc, tag, &worker)) {
gpr_log(GPR_DEBUG,
"Too many outstanding grpc_completion_queue_pluck calls: maximum is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
break;
}
if (!grpc_pollset_work(&cc->pollset, &worker, deadline)) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret)); memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT; ret.type = GRPC_QUEUE_TIMEOUT;
break; break;
} }
del_plucker(cc, tag, &worker);
} }
done: done:
GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
@ -266,15 +321,6 @@ grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
return &cc->pollset; return &cc->pollset;
} }
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
grpc_pollset_kick(&cc->pollset);
grpc_pollset_work(&cc->pollset,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(100, GPR_TIMESPAN)));
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; } void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }
int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; } int grpc_cq_is_server_cq(grpc_completion_queue *cc) { return cc->is_server_cq; }

@ -77,8 +77,6 @@ void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc); grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc);
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc);
void grpc_cq_mark_server_cq(grpc_completion_queue *cc); void grpc_cq_mark_server_cq(grpc_completion_queue *cc);
int grpc_cq_is_server_cq(grpc_completion_queue *cc); int grpc_cq_is_server_cq(grpc_completion_queue *cc);

@ -327,6 +327,14 @@ static void request_matcher_zombify_all_pending_calls(
} }
} }
static void request_matcher_kill_requests(grpc_server *server,
request_matcher *rm) {
int request_id;
while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
fail_call(server, &server->requested_calls[request_id]);
}
}
/* /*
* server proper * server proper
*/ */
@ -492,12 +500,25 @@ static int num_channels(grpc_server *server) {
return n; return n;
} }
static void kill_pending_work_locked(grpc_server *server) {
registered_method *rm;
request_matcher_kill_requests(server, &server->unregistered_request_matcher);
request_matcher_zombify_all_pending_calls(
&server->unregistered_request_matcher);
for (rm = server->registered_methods; rm; rm = rm->next) {
request_matcher_kill_requests(server, &rm->request_matcher);
request_matcher_zombify_all_pending_calls(&rm->request_matcher);
}
}
static void maybe_finish_shutdown(grpc_server *server) { static void maybe_finish_shutdown(grpc_server *server) {
size_t i; size_t i;
if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) { if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
return; return;
} }
kill_pending_work_locked(server);
if (server->root_channel_data.next != &server->root_channel_data || if (server->root_channel_data.next != &server->root_channel_data ||
server->listeners_destroyed < num_listeners(server)) { server->listeners_destroyed < num_listeners(server)) {
if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME),
@ -947,52 +968,15 @@ void grpc_server_setup_transport(grpc_server *s, grpc_transport *transport,
op.set_accept_stream_user_data = chand; op.set_accept_stream_user_data = chand;
op.on_connectivity_state_change = &chand->channel_connectivity_changed; op.on_connectivity_state_change = &chand->channel_connectivity_changed;
op.connectivity_state = &chand->connectivity_state; op.connectivity_state = &chand->connectivity_state;
op.disconnect = gpr_atm_acq_load(&s->shutdown_flag);
grpc_transport_perform_op(transport, &op); grpc_transport_perform_op(transport, &op);
} }
typedef struct {
requested_call **requests;
size_t count;
size_t capacity;
} request_killer;
static void request_killer_init(request_killer *rk) {
memset(rk, 0, sizeof(*rk));
}
static void request_killer_add(request_killer *rk, requested_call *rc) {
if (rk->capacity == rk->count) {
rk->capacity = GPR_MAX(8, rk->capacity * 2);
rk->requests =
gpr_realloc(rk->requests, rk->capacity * sizeof(*rk->requests));
}
rk->requests[rk->count++] = rc;
}
static void request_killer_add_request_matcher(request_killer *rk,
grpc_server *server,
request_matcher *rm) {
int request_id;
while ((request_id = gpr_stack_lockfree_pop(rm->requests)) != -1) {
request_killer_add(rk, &server->requested_calls[request_id]);
}
}
static void request_killer_run(request_killer *rk, grpc_server *server) {
size_t i;
for (i = 0; i < rk->count; i++) {
fail_call(server, rk->requests[i]);
}
gpr_free(rk->requests);
}
void grpc_server_shutdown_and_notify(grpc_server *server, void grpc_server_shutdown_and_notify(grpc_server *server,
grpc_completion_queue *cq, void *tag) { grpc_completion_queue *cq, void *tag) {
listener *l; listener *l;
registered_method *rm;
shutdown_tag *sdt; shutdown_tag *sdt;
channel_broadcaster broadcaster; channel_broadcaster broadcaster;
request_killer reqkill;
GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag); GRPC_SERVER_LOG_SHUTDOWN(GPR_INFO, server, cq, tag);
@ -1013,27 +997,16 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME); server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
channel_broadcaster_init(server, &broadcaster); channel_broadcaster_init(server, &broadcaster);
request_killer_init(&reqkill);
/* collect all unregistered then registered calls */ /* collect all unregistered then registered calls */
gpr_mu_lock(&server->mu_call); gpr_mu_lock(&server->mu_call);
request_killer_add_request_matcher(&reqkill, server, kill_pending_work_locked(server);
&server->unregistered_request_matcher);
request_matcher_zombify_all_pending_calls(
&server->unregistered_request_matcher);
for (rm = server->registered_methods; rm; rm = rm->next) {
request_killer_add_request_matcher(&reqkill, server, &rm->request_matcher);
request_matcher_zombify_all_pending_calls(&rm->request_matcher);
}
gpr_mu_unlock(&server->mu_call); gpr_mu_unlock(&server->mu_call);
gpr_atm_rel_store(&server->shutdown_flag, 1); gpr_atm_rel_store(&server->shutdown_flag, 1);
maybe_finish_shutdown(server); maybe_finish_shutdown(server);
gpr_mu_unlock(&server->mu_global); gpr_mu_unlock(&server->mu_global);
/* terminate all the requested calls */
request_killer_run(&reqkill, server);
/* Shutdown listeners */ /* Shutdown listeners */
for (l = server->listeners; l; l = l->next) { for (l = server->listeners; l; l = l->next) {
l->destroy(server, l->arg); l->destroy(server, l->arg);

@ -119,6 +119,10 @@ typedef enum {
GRPC_WRITE_STATE_SENT_CLOSE GRPC_WRITE_STATE_SENT_CLOSE
} grpc_chttp2_write_state; } grpc_chttp2_write_state;
/* flags that can be or'd into stream_global::writing_now */
#define GRPC_CHTTP2_WRITING_DATA 1
#define GRPC_CHTTP2_WRITING_WINDOW 2
typedef enum { typedef enum {
GRPC_DONT_SEND_CLOSED = 0, GRPC_DONT_SEND_CLOSED = 0,
GRPC_SEND_CLOSED, GRPC_SEND_CLOSED,
@ -382,7 +386,7 @@ typedef struct {
gpr_uint8 published_cancelled; gpr_uint8 published_cancelled;
/** is this stream in the stream map? (boolean) */ /** is this stream in the stream map? (boolean) */
gpr_uint8 in_stream_map; gpr_uint8 in_stream_map;
/** is this stream actively being written? */ /** bitmask of GRPC_CHTTP2_WRITING_xxx above */
gpr_uint8 writing_now; gpr_uint8 writing_now;
/** stream state already published to the upper layer */ /** stream state already published to the upper layer */

@ -164,9 +164,6 @@ void grpc_chttp2_list_add_first_writable_stream(
grpc_chttp2_transport_global *transport_global, grpc_chttp2_transport_global *transport_global,
grpc_chttp2_stream_global *stream_global) { grpc_chttp2_stream_global *stream_global) {
GPR_ASSERT(stream_global->id != 0); GPR_ASSERT(stream_global->id != 0);
gpr_log(GPR_DEBUG, "add:%d:%d:%d:%d", stream_global->id,
stream_global->write_state, stream_global->in_stream_map,
stream_global->read_closed);
stream_list_add_head(TRANSPORT_FROM_GLOBAL(transport_global), stream_list_add_head(TRANSPORT_FROM_GLOBAL(transport_global),
STREAM_FROM_GLOBAL(stream_global), STREAM_FROM_GLOBAL(stream_global),
GRPC_CHTTP2_LIST_WRITABLE); GRPC_CHTTP2_LIST_WRITABLE);

@ -77,7 +77,6 @@ int grpc_chttp2_unlocking_check_writes(
stream_writing->id = stream_global->id; stream_writing->id = stream_global->id;
stream_writing->send_closed = GRPC_DONT_SEND_CLOSED; stream_writing->send_closed = GRPC_DONT_SEND_CLOSED;
GPR_ASSERT(!stream_global->writing_now);
if (stream_global->outgoing_sopb) { if (stream_global->outgoing_sopb) {
window_delta = window_delta =
@ -123,11 +122,13 @@ int grpc_chttp2_unlocking_check_writes(
stream_global->unannounced_incoming_window = 0; stream_global->unannounced_incoming_window = 0;
grpc_chttp2_list_add_incoming_window_updated(transport_global, grpc_chttp2_list_add_incoming_window_updated(transport_global,
stream_global); stream_global);
stream_global->writing_now = 1; stream_global->writing_now |= GRPC_CHTTP2_WRITING_WINDOW;
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing); }
} else if (stream_writing->sopb.nops > 0 || if (stream_writing->sopb.nops > 0 ||
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) { stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
stream_global->writing_now = 1; stream_global->writing_now |= GRPC_CHTTP2_WRITING_DATA;
}
if (stream_global->writing_now != 0) {
grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing); grpc_chttp2_list_add_writing_stream(transport_writing, stream_writing);
} }
} }
@ -183,6 +184,7 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
stream_writing->send_closed != GRPC_DONT_SEND_CLOSED, stream_writing->send_closed != GRPC_DONT_SEND_CLOSED,
stream_writing->id, &transport_writing->hpack_compressor, stream_writing->id, &transport_writing->hpack_compressor,
&transport_writing->outbuf); &transport_writing->outbuf);
stream_writing->sopb.nops = 0;
} }
if (stream_writing->announce_window > 0) { if (stream_writing->announce_window > 0) {
gpr_slice_buffer_add( gpr_slice_buffer_add(
@ -191,7 +193,6 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) {
stream_writing->id, stream_writing->announce_window)); stream_writing->id, stream_writing->announce_window));
stream_writing->announce_window = 0; stream_writing->announce_window = 0;
} }
stream_writing->sopb.nops = 0;
if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) { if (stream_writing->send_closed == GRPC_SEND_CLOSED_WITH_RST_STREAM) {
gpr_slice_buffer_add(&transport_writing->outbuf, gpr_slice_buffer_add(&transport_writing->outbuf,
grpc_chttp2_rst_stream_create(stream_writing->id, grpc_chttp2_rst_stream_create(stream_writing->id,
@ -215,20 +216,23 @@ void grpc_chttp2_cleanup_writing(
while (grpc_chttp2_list_pop_written_stream( while (grpc_chttp2_list_pop_written_stream(
transport_global, transport_writing, &stream_global, &stream_writing)) { transport_global, transport_writing, &stream_global, &stream_writing)) {
GPR_ASSERT(stream_global->writing_now); GPR_ASSERT(stream_global->writing_now != 0);
stream_global->writing_now = 0; if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
if (!transport_global->is_client) {
stream_global->read_closed = 1;
}
}
if (stream_global->writing_now & GRPC_CHTTP2_WRITING_DATA) {
if (stream_global->outgoing_sopb != NULL && if (stream_global->outgoing_sopb != NULL &&
stream_global->outgoing_sopb->nops == 0) { stream_global->outgoing_sopb->nops == 0) {
GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_QUEUED_CLOSE);
stream_global->outgoing_sopb = NULL; stream_global->outgoing_sopb = NULL;
grpc_chttp2_schedule_closure(transport_global, grpc_chttp2_schedule_closure(transport_global,
stream_global->send_done_closure, 1); stream_global->send_done_closure, 1);
} }
if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
if (!transport_global->is_client) {
stream_global->read_closed = 1;
}
} }
stream_global->writing_now = 0;
grpc_chttp2_list_add_read_write_state_changed(transport_global, grpc_chttp2_list_add_read_write_state_changed(transport_global,
stream_global); stream_global);
} }

@ -823,6 +823,12 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
stream_global); stream_global);
} else { } else {
stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE; stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
if (stream_global->outgoing_sopb != NULL) {
grpc_sopb_reset(stream_global->outgoing_sopb);
stream_global->outgoing_sopb = NULL;
grpc_chttp2_schedule_closure(transport_global,
stream_global->send_done_closure, 1);
}
stream_global->read_closed = 1; stream_global->read_closed = 1;
if (!stream_global->published_cancelled) { if (!stream_global->published_cancelled) {
char buffer[GPR_LTOA_MIN_BUFSIZE]; char buffer[GPR_LTOA_MIN_BUFSIZE];
@ -849,7 +855,7 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t) {
if (!stream_global->publish_sopb) { if (!stream_global->publish_sopb) {
continue; continue;
} }
if (stream_global->writing_now) { if (stream_global->writing_now != 0) {
continue; continue;
} }
/* FIXME(ctiller): we include in_stream_map in our computation of /* FIXME(ctiller): we include in_stream_map in our computation of

@ -64,7 +64,7 @@ static void on_finish(void *arg, const grpc_httpcli_response *response) {
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length)); GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length));
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_done = 1; g_done = 1;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -87,7 +87,8 @@ static void test_get(int use_ssl, int port) {
(void *)42); (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) { while (!g_done) {
grpc_pollset_work(&g_pollset, n_seconds_time(20)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host); gpr_free(host);
@ -112,7 +113,8 @@ static void test_post(int use_ssl, int port) {
n_seconds_time(15), on_finish, (void *)42); n_seconds_time(15), on_finish, (void *)42);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) { while (!g_done) {
grpc_pollset_work(&g_pollset, n_seconds_time(20)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, n_seconds_time(20));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host); gpr_free(host);

@ -132,7 +132,7 @@ static void read_and_write_test_read_handler(void *data, gpr_slice *slices,
gpr_log(GPR_INFO, "Read handler shutdown"); gpr_log(GPR_INFO, "Read handler shutdown");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->read_done = 1; state->read_done = 1;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
return; return;
} }
@ -143,7 +143,7 @@ static void read_and_write_test_read_handler(void *data, gpr_slice *slices,
gpr_log(GPR_INFO, "Read handler done"); gpr_log(GPR_INFO, "Read handler done");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->read_done = 1; state->read_done = 1;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
} else { } else {
grpc_endpoint_notify_on_read(state->read_ep, grpc_endpoint_notify_on_read(state->read_ep,
@ -167,7 +167,7 @@ static void read_and_write_test_write_handler(void *data,
gpr_log(GPR_INFO, "Write handler shutdown"); gpr_log(GPR_INFO, "Write handler shutdown");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->write_done = 1; state->write_done = 1;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
return; return;
} }
@ -201,7 +201,7 @@ static void read_and_write_test_write_handler(void *data,
gpr_log(GPR_INFO, "Write handler done"); gpr_log(GPR_INFO, "Write handler done");
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->write_done = 1; state->write_done = 1;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
} }
@ -254,8 +254,9 @@ static void read_and_write_test(grpc_endpoint_test_config config,
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!state.read_done || !state.write_done) { while (!state.read_done || !state.write_done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0); GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
grpc_pollset_work(g_pollset, deadline); grpc_pollset_work(g_pollset, &worker, deadline);
} }
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
@ -287,7 +288,7 @@ static void shutdown_during_write_test_read_handler(
grpc_endpoint_destroy(st->ep); grpc_endpoint_destroy(st->ep);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
st->done = error; st->done = error;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
} else { } else {
grpc_endpoint_notify_on_read( grpc_endpoint_notify_on_read(
@ -309,7 +310,7 @@ static void shutdown_during_write_test_write_handler(
} }
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
st->done = 1; st->done = 1;
grpc_pollset_kick(g_pollset); grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
} }
@ -350,15 +351,17 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config,
deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!write_st.done) { while (!write_st.done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, deadline); grpc_pollset_work(g_pollset, &worker, deadline);
} }
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
grpc_endpoint_destroy(write_st.ep); grpc_endpoint_destroy(write_st.ep);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!read_st.done) { while (!read_st.done) {
grpc_pollset_worker worker;
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, deadline); grpc_pollset_work(g_pollset, &worker, deadline);
} }
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
gpr_free(slices); gpr_free(slices);

@ -179,7 +179,7 @@ static void listen_shutdown_cb(void *arg /*server*/, int success) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
sv->done = 1; sv->done = 1;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -249,7 +249,8 @@ static int server_start(server *sv) {
static void server_wait_and_shutdown(server *sv) { static void server_wait_and_shutdown(server *sv) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!sv->done) { while (!sv->done) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -286,7 +287,7 @@ static void client_session_shutdown_cb(void *arg /*client*/, int success) {
client *cl = arg; client *cl = arg;
grpc_fd_orphan(cl->em_fd, NULL, "c"); grpc_fd_orphan(cl->em_fd, NULL, "c");
cl->done = 1; cl->done = 1;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
} }
/* Write as much as possible, then register notify_on_write. */ /* Write as much as possible, then register notify_on_write. */
@ -356,7 +357,8 @@ static void client_start(client *cl, int port) {
static void client_wait_and_shutdown(client *cl) { static void client_wait_and_shutdown(client *cl) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!cl->done) { while (!cl->done) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -392,7 +394,7 @@ static void first_read_callback(void *arg /* fd_change_data */, int success) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
fdc->cb_that_ran = first_read_callback; fdc->cb_that_ran = first_read_callback;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -401,7 +403,7 @@ static void second_read_callback(void *arg /* fd_change_data */, int success) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
fdc->cb_that_ran = second_read_callback; fdc->cb_that_ran = second_read_callback;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -445,7 +447,8 @@ static void test_grpc_fd_change(void) {
/* And now wait for it to run. */ /* And now wait for it to run. */
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (a.cb_that_ran == NULL) { while (a.cb_that_ran == NULL) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
} }
GPR_ASSERT(a.cb_that_ran == first_read_callback); GPR_ASSERT(a.cb_that_ran == first_read_callback);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -463,7 +466,8 @@ static void test_grpc_fd_change(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (b.cb_that_ran == NULL) { while (b.cb_that_ran == NULL) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, gpr_inf_future(GPR_CLOCK_MONOTONIC));
} }
/* Except now we verify that second_read_callback ran instead */ /* Except now we verify that second_read_callback ran instead */
GPR_ASSERT(b.cb_that_ran == second_read_callback); GPR_ASSERT(b.cb_that_ran == second_read_callback);

@ -1,130 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/iomgr/pollset_kick_posix.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
static void test_allocation(void) {
grpc_pollset_kick_state state;
grpc_pollset_kick_init(&state);
grpc_pollset_kick_destroy(&state);
}
static void test_non_kick(void) {
grpc_pollset_kick_state state;
grpc_kick_fd_info *kfd;
grpc_pollset_kick_init(&state);
kfd = grpc_pollset_kick_pre_poll(&state);
GPR_ASSERT(kfd != NULL);
grpc_pollset_kick_post_poll(&state, kfd);
grpc_pollset_kick_destroy(&state);
}
static void test_basic_kick(void) {
/* Kicked during poll */
grpc_pollset_kick_state state;
grpc_kick_fd_info *kfd;
grpc_pollset_kick_init(&state);
kfd = grpc_pollset_kick_pre_poll(&state);
GPR_ASSERT(kfd != NULL);
grpc_pollset_kick_kick(&state);
/* Now hypothetically we polled and found that we were kicked */
grpc_pollset_kick_consume(&state, kfd);
grpc_pollset_kick_post_poll(&state, kfd);
grpc_pollset_kick_destroy(&state);
}
static void test_non_poll_kick(void) {
/* Kick before entering poll */
grpc_pollset_kick_state state;
grpc_kick_fd_info *kfd;
grpc_pollset_kick_init(&state);
grpc_pollset_kick_kick(&state);
kfd = grpc_pollset_kick_pre_poll(&state);
GPR_ASSERT(kfd == NULL);
grpc_pollset_kick_destroy(&state);
}
#define GRPC_MAX_CACHED_PIPES 50
static void test_over_free(void) {
/* Check high watermark pipe free logic */
int i;
grpc_kick_fd_info **kfds =
gpr_malloc(sizeof(grpc_kick_fd_info *) * GRPC_MAX_CACHED_PIPES);
grpc_pollset_kick_state state;
grpc_pollset_kick_init(&state);
for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) {
kfds[i] = grpc_pollset_kick_pre_poll(&state);
GPR_ASSERT(kfds[i] != NULL);
}
for (i = 0; i < GRPC_MAX_CACHED_PIPES; ++i) {
grpc_pollset_kick_post_poll(&state, kfds[i]);
}
grpc_pollset_kick_destroy(&state);
gpr_free(kfds);
}
static void run_tests(void) {
test_allocation();
test_basic_kick();
test_non_poll_kick();
test_non_kick();
test_over_free();
}
int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_pollset_kick_global_init();
run_tests();
grpc_pollset_kick_global_destroy();
grpc_pollset_kick_global_init_fallback_fd();
run_tests();
grpc_pollset_kick_global_destroy();
return 0;
}

@ -56,7 +56,7 @@ static gpr_timespec test_deadline(void) {
static void finish_connection() { static void finish_connection() {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_connections_complete++; g_connections_complete++;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -111,7 +111,8 @@ void test_succeeds(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (g_connections_complete == connections_complete_before) { while (g_connections_complete == connections_complete_before) {
grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -140,7 +141,8 @@ void test_fails(void) {
/* wait for the connection callback to finish */ /* wait for the connection callback to finish */
while (g_connections_complete == connections_complete_before) { while (g_connections_complete == connections_complete_before) {
grpc_pollset_work(&g_pollset, test_deadline()); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, test_deadline());
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -199,6 +201,7 @@ void test_times_out(void) {
gpr_now(connect_deadline.clock_type)) > 0) { gpr_now(connect_deadline.clock_type)) > 0) {
int is_after_deadline = int is_after_deadline =
gpr_time_cmp(connect_deadline, gpr_now(GPR_CLOCK_MONOTONIC)) <= 0; gpr_time_cmp(connect_deadline, gpr_now(GPR_CLOCK_MONOTONIC)) <= 0;
grpc_pollset_worker worker;
if (is_after_deadline && if (is_after_deadline &&
gpr_time_cmp(gpr_time_add(connect_deadline, gpr_time_cmp(gpr_time_add(connect_deadline,
gpr_time_from_seconds(1, GPR_TIMESPAN)), gpr_time_from_seconds(1, GPR_TIMESPAN)),
@ -208,7 +211,7 @@ void test_times_out(void) {
GPR_ASSERT(g_connections_complete == GPR_ASSERT(g_connections_complete ==
connections_complete_before + is_after_deadline); connections_complete_before + is_after_deadline);
} }
grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

@ -186,7 +186,8 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) { while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_work(&g_pollset, deadline); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
} }
GPR_ASSERT(state.read_bytes == state.target_read_bytes); GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -222,7 +223,8 @@ static void large_read_test(ssize_t slice_size) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) { while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_work(&g_pollset, deadline); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
} }
GPR_ASSERT(state.read_bytes == state.target_read_bytes); GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -265,7 +267,7 @@ static void write_done(void *user_data /* write_socket_state */,
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_log(GPR_INFO, "Signalling write done"); gpr_log(GPR_INFO, "Signalling write done");
state->write_done = 1; state->write_done = 1;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -281,8 +283,9 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0);
for (;;) { for (;;) {
grpc_pollset_worker worker;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&g_pollset, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); grpc_pollset_work(&g_pollset, &worker, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
do { do {
bytes_read = bytes_read =
@ -358,10 +361,11 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) {
drain_socket_blocking(sv[0], num_bytes, num_bytes); drain_socket_blocking(sv[0], num_bytes, num_bytes);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
for (;;) { for (;;) {
grpc_pollset_worker worker;
if (state.write_done) { if (state.write_done) {
break; break;
} }
grpc_pollset_work(&g_pollset, deadline); grpc_pollset_work(&g_pollset, &worker, deadline);
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -387,6 +391,7 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) {
size_t num_blocks; size_t num_blocks;
gpr_slice *slices; gpr_slice *slices;
int current_data = 0; int current_data = 0;
grpc_pollset_worker worker;
gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
gpr_log(GPR_INFO, "Start write error test with %d bytes, slice size %d", gpr_log(GPR_INFO, "Start write error test with %d bytes, slice size %d",
@ -417,7 +422,7 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) {
if (state.write_done) { if (state.write_done) {
break; break;
} }
grpc_pollset_work(&g_pollset, deadline); grpc_pollset_work(&g_pollset, &worker, deadline);
} }
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
break; break;

@ -54,7 +54,7 @@ static void on_connect(void *arg, grpc_endpoint *tcp) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_nconnects++; g_nconnects++;
grpc_pollset_kick(&g_pollset); grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} }
@ -136,7 +136,8 @@ static void test_connect(int n) {
gpr_log(GPR_DEBUG, "wait"); gpr_log(GPR_DEBUG, "wait");
while (g_nconnects == nconnects_before && while (g_nconnects == nconnects_before &&
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_work(&g_pollset, deadline); grpc_pollset_worker worker;
grpc_pollset_work(&g_pollset, &worker, deadline);
} }
gpr_log(GPR_DEBUG, "wait done"); gpr_log(GPR_DEBUG, "wait done");

@ -68,7 +68,7 @@ static void on_oauth2_response(void *user_data, grpc_credentials_md *md_elems,
gpr_mu_lock(GRPC_POLLSET_MU(&request->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&request->pollset));
request->is_done = 1; request->is_done = 1;
request->token = token; request->token = token;
grpc_pollset_kick(&request->pollset); grpc_pollset_kick(&request->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&request->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&request->pollset));
} }
@ -83,8 +83,11 @@ char *grpc_test_fetch_oauth2_token_with_credentials(grpc_credentials *creds) {
on_oauth2_response, &request); on_oauth2_response, &request);
gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
while (!request.is_done) while (!request.is_done) {
grpc_pollset_work(&request.pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC)); grpc_pollset_worker worker;
grpc_pollset_work(&request.pollset, &worker,
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));
grpc_pollset_shutdown(&request.pollset, do_nothing, NULL); grpc_pollset_shutdown(&request.pollset, do_nothing, NULL);

@ -65,7 +65,7 @@ static void on_metadata_response(void *user_data,
} }
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
sync->is_done = 1; sync->is_done = 1;
grpc_pollset_kick(&sync->pollset); grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
} }
@ -95,8 +95,11 @@ int main(int argc, char **argv) {
on_metadata_response, &sync); on_metadata_response, &sync);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
while (!sync.is_done) while (!sync.is_done) {
grpc_pollset_work(&sync.pollset, gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_pollset_worker worker;
grpc_pollset_work(&sync.pollset, &worker,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_credentials_release(creds); grpc_credentials_release(creds);

@ -79,7 +79,7 @@ static void on_jwt_verification_done(void *user_data,
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
sync->is_done = 1; sync->is_done = 1;
grpc_pollset_kick(&sync->pollset); grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
} }
@ -109,8 +109,11 @@ int main(int argc, char **argv) {
on_jwt_verification_done, &sync); on_jwt_verification_done, &sync);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset)); gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
while (!sync.is_done) while (!sync.is_done) {
grpc_pollset_work(&sync.pollset, gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_pollset_worker worker;
grpc_pollset_work(&sync.pollset, &worker,
gpr_inf_future(GPR_CLOCK_REALTIME));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset)); gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_jwt_verifier_destroy(verifier); grpc_jwt_verifier_destroy(verifier);

@ -0,0 +1,160 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/util/reconnect_server.h"
#include <arpa/inet.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include <string.h>
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/tcp_server.h"
#include "test/core/util/port.h"
static void pretty_print_backoffs(reconnect_server *server) {
gpr_timespec diff;
int i = 1;
double expected_backoff = 1000.0, backoff;
timestamp_list *head = server->head;
gpr_log(GPR_INFO, "reconnect server: new connection");
for (head = server->head; head && head->next; head = head->next, i++) {
diff = gpr_time_sub(head->next->timestamp, head->timestamp);
backoff = gpr_time_to_millis(diff);
gpr_log(GPR_INFO,
"retry %2d:backoff %6.2fs,expected backoff %6.2fs, jitter %4.2f%%",
i, backoff / 1000.0, expected_backoff / 1000.0,
(backoff - expected_backoff) * 100.0 / expected_backoff);
expected_backoff *= 1.6;
if (expected_backoff > 120 * 1000) {
expected_backoff = 120 * 1000;
}
}
}
static void on_connect(void *arg, grpc_endpoint *tcp) {
char *peer;
char *last_colon;
reconnect_server *server = (reconnect_server *)arg;
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
timestamp_list *new_tail;
peer = grpc_endpoint_get_peer(tcp);
grpc_endpoint_shutdown(tcp);
grpc_endpoint_destroy(tcp);
if (peer) {
last_colon = strrchr(peer, ':');
if (server->peer == NULL) {
server->peer = peer;
} else {
if (last_colon == NULL) {
gpr_log(GPR_ERROR, "peer does not contain a ':'");
} else if (strncmp(server->peer, peer, last_colon - peer) != 0) {
gpr_log(GPR_ERROR, "mismatched peer! %s vs %s", server->peer, peer);
}
gpr_free(peer);
}
}
new_tail = gpr_malloc(sizeof(timestamp_list));
new_tail->timestamp = now;
new_tail->next = NULL;
if (server->tail == NULL) {
server->head = new_tail;
server->tail = new_tail;
} else {
server->tail->next = new_tail;
server->tail = new_tail;
}
pretty_print_backoffs(server);
}
void reconnect_server_init(reconnect_server *server) {
grpc_init();
server->tcp_server = NULL;
grpc_pollset_init(&server->pollset);
server->pollsets[0] = &server->pollset;
server->head = NULL;
server->tail = NULL;
server->peer = NULL;
}
void reconnect_server_start(reconnect_server *server, int port) {
struct sockaddr_in addr;
int port_added;
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
inet_pton(AF_INET, "0.0.0.0", &addr.sin_addr);
server->tcp_server = grpc_tcp_server_create();
port_added =
grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr));
GPR_ASSERT(port_added == port);
grpc_tcp_server_start(server->tcp_server, server->pollsets, 1, on_connect,
server);
gpr_log(GPR_INFO, "reconnect tcp server listening on 0.0.0.0:%d", port);
}
void reconnect_server_poll(reconnect_server *server, int seconds) {
gpr_timespec deadline =
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(seconds, GPR_TIMESPAN));
gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
grpc_pollset_work(&server->pollset, deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
}
void reconnect_server_clear_timestamps(reconnect_server *server) {
timestamp_list *new_head = server->head;
while (server->head) {
new_head = server->head->next;
gpr_free(server->head);
server->head = new_head;
}
server->tail = NULL;
gpr_free(server->peer);
server->peer = NULL;
}
static void do_nothing(void *ignored) {}
void reconnect_server_destroy(reconnect_server *server) {
grpc_tcp_server_destroy(server->tcp_server, do_nothing, NULL);
reconnect_server_clear_timestamps(server);
grpc_pollset_shutdown(&server->pollset, do_nothing, NULL);
grpc_pollset_destroy(&server->pollset);
grpc_shutdown();
}

@ -0,0 +1,69 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
#define GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/tcp_server.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct timestamp_list {
gpr_timespec timestamp;
struct timestamp_list *next;
} timestamp_list;
typedef struct reconnect_server {
grpc_tcp_server *tcp_server;
grpc_pollset pollset;
grpc_pollset *pollsets[1];
timestamp_list *head;
timestamp_list *tail;
char *peer;
} reconnect_server;
void reconnect_server_init(reconnect_server *server);
void reconnect_server_start(reconnect_server *server, int port);
void reconnect_server_poll(reconnect_server *server, int seconds);
void reconnect_server_destroy(reconnect_server *server);
void reconnect_server_clear_timestamps(reconnect_server *server);
#ifdef __cplusplus
}
#endif
#endif /* GRPC_TEST_CORE_UTIL_RECONNECT_SERVER_H */

@ -830,6 +830,31 @@ TEST_F(End2endTest, HugeResponse) {
EXPECT_TRUE(s.ok()); EXPECT_TRUE(s.ok());
} }
namespace {
void ReaderThreadFunc(ClientReaderWriter<EchoRequest, EchoResponse>* stream, gpr_event *ev) {
EchoResponse resp;
gpr_event_set(ev, (void*)1);
while (stream->Read(&resp)) {
gpr_log(GPR_INFO, "Read message");
}
}
} // namespace
// Run a Read and a WritesDone simultaneously.
TEST_F(End2endTest, SimultaneousReadWritesDone) {
ResetStub();
ClientContext context;
gpr_event ev;
gpr_event_init(&ev);
auto stream = stub_->BidiStream(&context);
std::thread reader_thread(ReaderThreadFunc, stream.get(), &ev);
gpr_event_wait(&ev, gpr_inf_future(GPR_CLOCK_REALTIME));
stream->WritesDone();
Status s = stream->Finish();
EXPECT_TRUE(s.ok());
reader_thread.join();
}
TEST_F(End2endTest, Peer) { TEST_F(End2endTest, Peer) {
ResetStub(); ResetStub();
EchoRequest request; EchoRequest request;

@ -70,6 +70,7 @@ DEFINE_string(test_case, "large_unary",
"jwt_token_creds: large_unary with JWT token auth; " "jwt_token_creds: large_unary with JWT token auth; "
"oauth2_auth_token: raw oauth2 access token auth; " "oauth2_auth_token: raw oauth2 access token auth; "
"per_rpc_creds: raw oauth2 access token on a single rpc; " "per_rpc_creds: raw oauth2 access token on a single rpc; "
"status_code_and_message: verify status code & message; "
"all : all of above."); "all : all of above.");
DEFINE_string(default_service_account, "", DEFINE_string(default_service_account, "",
"Email of GCE default service account"); "Email of GCE default service account");
@ -82,7 +83,7 @@ using grpc::testing::GetServiceAccountJsonKey;
int main(int argc, char** argv) { int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true); grpc::testing::InitTest(&argc, &argv, true);
gpr_log(GPR_INFO, "Testing these cases: %s", FLAGS_test_case.c_str());
int ret = 0; int ret = 0;
grpc::testing::InteropClient client( grpc::testing::InteropClient client(
CreateChannelForTestCase(FLAGS_test_case)); CreateChannelForTestCase(FLAGS_test_case));
@ -121,6 +122,8 @@ int main(int argc, char** argv) {
} else if (FLAGS_test_case == "per_rpc_creds") { } else if (FLAGS_test_case == "per_rpc_creds") {
grpc::string json_key = GetServiceAccountJsonKey(); grpc::string json_key = GetServiceAccountJsonKey();
client.DoPerRpcCreds(json_key, FLAGS_oauth_scope); client.DoPerRpcCreds(json_key, FLAGS_oauth_scope);
} else if (FLAGS_test_case == "status_code_and_message") {
client.DoStatusWithMessage();
} else if (FLAGS_test_case == "all") { } else if (FLAGS_test_case == "all") {
client.DoEmpty(); client.DoEmpty();
client.DoLargeUnary(); client.DoLargeUnary();
@ -131,6 +134,7 @@ int main(int argc, char** argv) {
client.DoCancelAfterBegin(); client.DoCancelAfterBegin();
client.DoCancelAfterFirstResponse(); client.DoCancelAfterFirstResponse();
client.DoTimeoutOnSleepingServer(); client.DoTimeoutOnSleepingServer();
client.DoStatusWithMessage();
// service_account_creds and jwt_token_creds can only run with ssl. // service_account_creds and jwt_token_creds can only run with ssl.
if (FLAGS_enable_ssl) { if (FLAGS_enable_ssl) {
grpc::string json_key = GetServiceAccountJsonKey(); grpc::string json_key = GetServiceAccountJsonKey();

@ -423,5 +423,24 @@ void InteropClient::DoTimeoutOnSleepingServer() {
gpr_log(GPR_INFO, "Pingpong streaming timeout done."); gpr_log(GPR_INFO, "Pingpong streaming timeout done.");
} }
void InteropClient::DoStatusWithMessage() {
gpr_log(GPR_INFO, "Sending RPC with a request for status code 2 and message");
std::unique_ptr<TestService::Stub> stub(TestService::NewStub(channel_));
ClientContext context;
SimpleRequest request;
SimpleResponse response;
EchoStatus *requested_status = request.mutable_response_status();
requested_status->set_code(grpc::StatusCode::UNKNOWN);
grpc::string test_msg = "This is a test message";
requested_status->set_message(test_msg);
Status s = stub->UnaryCall(&context, request, &response);
GPR_ASSERT(s.error_code() == grpc::StatusCode::UNKNOWN);
GPR_ASSERT(s.error_message() == test_msg);
gpr_log(GPR_INFO, "Done testing Status and Message");
}
} // namespace testing } // namespace testing
} // namespace grpc } // namespace grpc

@ -60,6 +60,7 @@ class InteropClient {
void DoCancelAfterBegin(); void DoCancelAfterBegin();
void DoCancelAfterFirstResponse(); void DoCancelAfterFirstResponse();
void DoTimeoutOnSleepingServer(); void DoTimeoutOnSleepingServer();
void DoStatusWithMessage();
// Auth tests. // Auth tests.
// username is a string containing the user email // username is a string containing the user email
void DoJwtTokenCreds(const grpc::string& username); void DoJwtTokenCreds(const grpc::string& username);

@ -0,0 +1,103 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <memory>
#include <sstream>
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <gflags/gflags.h>
#include <grpc++/channel_interface.h>
#include <grpc++/client_context.h>
#include <grpc++/status.h>
#include "test/cpp/util/create_test_channel.h"
#include "test/cpp/util/test_config.h"
#include "test/proto/test.grpc.pb.h"
#include "test/proto/empty.grpc.pb.h"
#include "test/proto/messages.grpc.pb.h"
DEFINE_int32(server_control_port, 0, "Server port for control rpcs.");
DEFINE_int32(server_retry_port, 0, "Server port for testing reconnection.");
DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
using grpc::ChannelInterface;
using grpc::ClientContext;
using grpc::CreateTestChannel;
using grpc::Status;
using grpc::testing::Empty;
using grpc::testing::ReconnectInfo;
using grpc::testing::ReconnectService;
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
GPR_ASSERT(FLAGS_server_control_port);
GPR_ASSERT(FLAGS_server_retry_port);
std::ostringstream server_address;
server_address << FLAGS_server_host << ':' << FLAGS_server_control_port;
std::unique_ptr<ReconnectService::Stub> control_stub(
ReconnectService::NewStub(
CreateTestChannel(server_address.str(), false)));
ClientContext start_context;
Empty empty_request;
Empty empty_response;
Status start_status =
control_stub->Start(&start_context, empty_request, &empty_response);
GPR_ASSERT(start_status.ok());
gpr_log(GPR_INFO, "Starting connections with retries.");
server_address.str("");
server_address << FLAGS_server_host << ':' << FLAGS_server_retry_port;
std::shared_ptr<ChannelInterface> retry_channel =
CreateTestChannel(server_address.str(), true);
// About 13 retries.
const int kDeadlineSeconds = 540;
// Use any rpc to test retry.
std::unique_ptr<ReconnectService::Stub> retry_stub(
ReconnectService::NewStub(retry_channel));
ClientContext retry_context;
retry_context.set_deadline(std::chrono::system_clock::now() +
std::chrono::seconds(kDeadlineSeconds));
Status retry_status =
retry_stub->Start(&retry_context, empty_request, &empty_response);
GPR_ASSERT(retry_status.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED);
gpr_log(GPR_INFO, "Done retrying, getting final data from server");
ClientContext stop_context;
ReconnectInfo response;
Status stop_status =
control_stub->Stop(&stop_context, empty_request, &response);
GPR_ASSERT(stop_status.ok());
GPR_ASSERT(response.passed() == true);
return 0;
}

@ -0,0 +1,190 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <condition_variable>
#include <memory>
#include <mutex>
#include <sstream>
#include <signal.h>
#include <unistd.h>
#include <gflags/gflags.h>
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpc++/config.h>
#include <grpc++/server.h>
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc++/server_credentials.h>
#include <grpc++/status.h>
#include "test/core/util/reconnect_server.h"
#include "test/cpp/util/test_config.h"
#include "test/proto/test.grpc.pb.h"
#include "test/proto/empty.grpc.pb.h"
#include "test/proto/messages.grpc.pb.h"
DEFINE_int32(control_port, 0, "Server port for controlling the server.");
DEFINE_int32(retry_port, 0,
"Server port for raw tcp connections. All incoming "
"connections will be closed immediately.");
using grpc::Server;
using grpc::ServerBuilder;
using grpc::ServerContext;
using grpc::ServerCredentials;
using grpc::ServerReader;
using grpc::ServerReaderWriter;
using grpc::ServerWriter;
using grpc::SslServerCredentialsOptions;
using grpc::Status;
using grpc::testing::Empty;
using grpc::testing::ReconnectService;
using grpc::testing::ReconnectInfo;
static bool got_sigint = false;
class ReconnectServiceImpl : public ReconnectService::Service {
public:
explicit ReconnectServiceImpl(int retry_port)
: retry_port_(retry_port), serving_(false), shutdown_(false) {
reconnect_server_init(&tcp_server_);
}
~ReconnectServiceImpl() {
if (tcp_server_.tcp_server) {
reconnect_server_destroy(&tcp_server_);
}
}
void Poll(int seconds) { reconnect_server_poll(&tcp_server_, seconds); }
Status Start(ServerContext* context, const Empty* request, Empty* response) {
std::unique_lock<std::mutex> lock(mu_);
while (serving_ && !shutdown_) {
cv_.wait(lock);
}
if (shutdown_) {
return Status(grpc::StatusCode::UNAVAILABLE, "shutting down");
}
serving_ = true;
lock.unlock();
if (!tcp_server_.tcp_server) {
reconnect_server_start(&tcp_server_, retry_port_);
} else {
reconnect_server_clear_timestamps(&tcp_server_);
}
return Status::OK;
}
Status Stop(ServerContext* context, const Empty* request,
ReconnectInfo* response) {
// extract timestamps and set response
Verify(response);
reconnect_server_clear_timestamps(&tcp_server_);
std::lock_guard<std::mutex> lock(mu_);
serving_ = false;
cv_.notify_one();
return Status::OK;
}
void Verify(ReconnectInfo* response) {
double expected_backoff = 1000.0;
const double kTransmissionDelay = 100.0;
const double kBackoffMultiplier = 1.6;
const double kJitterFactor = 0.2;
const int kMaxBackoffMs = 120 * 1000;
bool passed = true;
for (timestamp_list* cur = tcp_server_.head; cur && cur->next;
cur = cur->next) {
double backoff = gpr_time_to_millis(
gpr_time_sub(cur->next->timestamp, cur->timestamp));
double min_backoff = expected_backoff * (1 - kJitterFactor);
double max_backoff = expected_backoff * (1 + kJitterFactor);
if (backoff < min_backoff - kTransmissionDelay ||
backoff > max_backoff + kTransmissionDelay) {
passed = false;
}
response->add_backoff_ms(static_cast<gpr_int32>(backoff));
expected_backoff *= kBackoffMultiplier;
expected_backoff =
expected_backoff > kMaxBackoffMs ? kMaxBackoffMs : expected_backoff;
}
response->set_passed(passed);
}
void Shutdown() {
std::lock_guard<std::mutex> lock(mu_);
shutdown_ = true;
cv_.notify_all();
}
private:
int retry_port_;
reconnect_server tcp_server_;
bool serving_;
bool shutdown_;
std::mutex mu_;
std::condition_variable cv_;
};
void RunServer() {
std::ostringstream server_address;
server_address << "0.0.0.0:" << FLAGS_control_port;
ReconnectServiceImpl service(FLAGS_retry_port);
ServerBuilder builder;
builder.RegisterService(&service);
builder.AddListeningPort(server_address.str(),
grpc::InsecureServerCredentials());
std::unique_ptr<Server> server(builder.BuildAndStart());
gpr_log(GPR_INFO, "Server listening on %s", server_address.str().c_str());
while (!got_sigint) {
service.Poll(5);
}
service.Shutdown();
}
static void sigint_handler(int x) { got_sigint = true; }
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
signal(SIGINT, sigint_handler);
GPR_ASSERT(FLAGS_control_port != 0);
GPR_ASSERT(FLAGS_retry_port != 0);
RunServer();
return 0;
}

@ -105,6 +105,13 @@ class TestServiceImpl : public TestService::Service {
return Status(grpc::StatusCode::INTERNAL, "Error creating payload."); return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
} }
} }
if (request->has_response_status()) {
return Status(static_cast<grpc::StatusCode>
(request->response_status().code()),
request->response_status().message());
}
return Status::OK; return Status::OK;
} }

@ -157,3 +157,11 @@ message StreamingOutputCallResponse {
// Payload to increase response size. // Payload to increase response size.
optional Payload payload = 1; optional Payload payload = 1;
} }
// For reconnect interop test only.
// Server tells client whether its reconnects are following the spec and the
// reconnect backoffs it saw.
message ReconnectInfo {
optional bool passed = 1;
repeated int32 backoff_ms = 2;
}

@ -79,3 +79,9 @@ service UnimplementedService {
// A call that no server should implement // A call that no server should implement
rpc UnimplementedCall(grpc.testing.Empty) returns(grpc.testing.Empty); rpc UnimplementedCall(grpc.testing.Empty) returns(grpc.testing.Empty);
} }
// A service used to control reconnect server.
service ReconnectService {
rpc Start(grpc.testing.Empty) returns (grpc.testing.Empty);
rpc Stop(grpc.testing.Empty) returns (grpc.testing.ReconnectInfo);
}

@ -821,7 +821,6 @@ src/core/iomgr/iomgr.h \
src/core/iomgr/iomgr_internal.h \ src/core/iomgr/iomgr_internal.h \
src/core/iomgr/iomgr_posix.h \ src/core/iomgr/iomgr_posix.h \
src/core/iomgr/pollset.h \ src/core/iomgr/pollset.h \
src/core/iomgr/pollset_kick_posix.h \
src/core/iomgr/pollset_posix.h \ src/core/iomgr/pollset_posix.h \
src/core/iomgr/pollset_set.h \ src/core/iomgr/pollset_set.h \
src/core/iomgr/pollset_set_posix.h \ src/core/iomgr/pollset_set_posix.h \
@ -943,7 +942,6 @@ src/core/iomgr/iocp_windows.c \
src/core/iomgr/iomgr.c \ src/core/iomgr/iomgr.c \
src/core/iomgr/iomgr_posix.c \ src/core/iomgr/iomgr_posix.c \
src/core/iomgr/iomgr_windows.c \ src/core/iomgr/iomgr_windows.c \
src/core/iomgr/pollset_kick_posix.c \
src/core/iomgr/pollset_multipoller_with_epoll.c \ src/core/iomgr/pollset_multipoller_with_epoll.c \
src/core/iomgr/pollset_multipoller_with_poll_posix.c \ src/core/iomgr/pollset_multipoller_with_poll_posix.c \
src/core/iomgr/pollset_posix.c \ src/core/iomgr/pollset_posix.c \

@ -817,20 +817,6 @@
"test/core/end2end/no_server_test.c" "test/core/end2end/no_server_test.c"
] ]
}, },
{
"deps": [
"gpr",
"gpr_test_util",
"grpc",
"grpc_test_util"
],
"headers": [],
"language": "c",
"name": "poll_kick_posix_test",
"src": [
"test/core/iomgr/poll_kick_posix_test.c"
]
},
{ {
"deps": [ "deps": [
"gpr", "gpr",
@ -1524,6 +1510,55 @@
"test/cpp/qps/worker.cc" "test/cpp/qps/worker.cc"
] ]
}, },
{
"deps": [
"gpr",
"gpr_test_util",
"grpc",
"grpc++",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util"
],
"headers": [
"test/proto/empty.grpc.pb.h",
"test/proto/empty.pb.h",
"test/proto/messages.grpc.pb.h",
"test/proto/messages.pb.h",
"test/proto/test.grpc.pb.h",
"test/proto/test.pb.h"
],
"language": "c++",
"name": "reconnect_interop_client",
"src": [
"test/cpp/interop/reconnect_interop_client.cc"
]
},
{
"deps": [
"gpr",
"gpr_test_util",
"grpc",
"grpc++",
"grpc++_test_config",
"grpc++_test_util",
"grpc_test_util",
"reconnect_server"
],
"headers": [
"test/proto/empty.grpc.pb.h",
"test/proto/empty.pb.h",
"test/proto/messages.grpc.pb.h",
"test/proto/messages.pb.h",
"test/proto/test.grpc.pb.h",
"test/proto/test.pb.h"
],
"language": "c++",
"name": "reconnect_interop_server",
"src": [
"test/cpp/interop/reconnect_interop_server.cc"
]
},
{ {
"deps": [ "deps": [
"gpr", "gpr",
@ -12230,7 +12265,6 @@
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",
@ -12400,8 +12434,6 @@
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -12702,7 +12734,6 @@
"src/core/iomgr/iomgr_internal.h", "src/core/iomgr/iomgr_internal.h",
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_set.h", "src/core/iomgr/pollset_set.h",
"src/core/iomgr/pollset_set_posix.h", "src/core/iomgr/pollset_set_posix.h",
@ -12850,8 +12881,6 @@
"src/core/iomgr/iomgr_posix.h", "src/core/iomgr/iomgr_posix.h",
"src/core/iomgr/iomgr_windows.c", "src/core/iomgr/iomgr_windows.c",
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_multipoller_with_epoll.c", "src/core/iomgr/pollset_multipoller_with_epoll.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
@ -12989,6 +13018,23 @@
"src/core/transport/transport_op_string.c" "src/core/transport/transport_op_string.c"
] ]
}, },
{
"deps": [
"gpr",
"gpr_test_util",
"grpc",
"grpc_test_util"
],
"headers": [
"test/core/util/reconnect_server.h"
],
"language": "c",
"name": "reconnect_server",
"src": [
"test/core/util/reconnect_server.c",
"test/core/util/reconnect_server.h"
]
},
{ {
"deps": [ "deps": [
"gpr", "gpr",

@ -463,14 +463,6 @@
"posix" "posix"
] ]
}, },
{
"flaky": false,
"language": "c",
"name": "poll_kick_posix_test",
"platforms": [
"posix"
]
},
{ {
"flaky": false, "flaky": false,
"language": "c", "language": "c",

File diff suppressed because one or more lines are too long

@ -283,7 +283,6 @@
<ClInclude Include="..\..\src\core\iomgr\iomgr_internal.h" /> <ClInclude Include="..\..\src\core\iomgr\iomgr_internal.h" />
<ClInclude Include="..\..\src\core\iomgr\iomgr_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\iomgr_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_set.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_set.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_set_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_set_posix.h" />
@ -467,8 +466,6 @@
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c"> <ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_kick_posix.c">
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_poll_posix.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_poll_posix.c">

@ -181,9 +181,6 @@
<ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c"> <ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_kick_posix.c">
<Filter>src\core\iomgr</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClCompile> </ClCompile>
@ -605,9 +602,6 @@
<ClInclude Include="..\..\src\core\iomgr\pollset.h"> <ClInclude Include="..\..\src\core\iomgr\pollset.h">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h">
<Filter>src\core\iomgr</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h"> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClInclude> </ClInclude>

@ -262,7 +262,6 @@
<ClInclude Include="..\..\src\core\iomgr\iomgr_internal.h" /> <ClInclude Include="..\..\src\core\iomgr\iomgr_internal.h" />
<ClInclude Include="..\..\src\core\iomgr\iomgr_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\iomgr_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_set.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_set.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_set_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_set_posix.h" />
@ -400,8 +399,6 @@
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c"> <ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_kick_posix.c">
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_poll_posix.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_poll_posix.c">

@ -112,9 +112,6 @@
<ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c"> <ClCompile Include="..\..\src\core\iomgr\iomgr_windows.c">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_kick_posix.c">
<Filter>src\core\iomgr</Filter>
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_multipoller_with_epoll.c">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClCompile> </ClCompile>
@ -482,9 +479,6 @@
<ClInclude Include="..\..\src\core\iomgr\pollset.h"> <ClInclude Include="..\..\src\core\iomgr\pollset.h">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h">
<Filter>src\core\iomgr</Filter>
</ClInclude>
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h"> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h">
<Filter>src\core\iomgr</Filter> <Filter>src\core\iomgr</Filter>
</ClInclude> </ClInclude>

Loading…
Cancel
Save