Merge github.com:grpc/grpc into stats

pull/11873/head
Craig Tiller 8 years ago
commit 39af3361ae
  1. 1
      BUILD
  2. 2
      CMakeLists.txt
  3. 2
      Makefile
  4. 2
      README.md
  5. 61
      binding.gyp
  6. 4
      build.yaml
  7. 2
      gRPC-Core.podspec
  8. 1
      grpc.gemspec
  9. 1
      package.xml
  10. 64
      src/core/ext/filters/client_channel/http_proxy.c
  11. 4
      src/core/ext/transport/chttp2/transport/parsing.c
  12. 7
      src/core/ext/transport/inproc/inproc_transport.c
  13. 282
      src/core/lib/iomgr/ev_epoll1_linux.c
  14. 8
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  15. 8
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
  16. 12
      src/core/lib/iomgr/ev_epollex_linux.c
  17. 10
      src/core/lib/iomgr/ev_epollsig_linux.c
  18. 8
      src/core/lib/iomgr/iomgr_uv.c
  19. 37
      src/core/lib/iomgr/iomgr_uv.h
  20. 14
      src/core/lib/iomgr/lockfree_event.c
  21. 5
      src/core/lib/iomgr/lockfree_event.h
  22. 7
      src/core/lib/iomgr/pollset_uv.c
  23. 21
      src/core/lib/iomgr/resolve_address_uv.c
  24. 5
      src/core/lib/iomgr/sockaddr_utils.c
  25. 2
      src/core/lib/iomgr/sockaddr_utils.h
  26. 3
      src/core/lib/iomgr/tcp_client_uv.c
  27. 129
      src/core/lib/iomgr/tcp_server_uv.c
  28. 7
      src/core/lib/iomgr/tcp_uv.c
  29. 4
      src/core/lib/iomgr/timer_uv.c
  30. 11
      src/cpp/server/create_default_thread_pool.cc
  31. 4
      src/cpp/server/thread_pool_interface.h
  32. 170
      src/ruby/lib/grpc/generic/active_call.rb
  33. 62
      src/ruby/lib/grpc/generic/bidi_call.rb
  34. 4
      src/ruby/lib/grpc/generic/rpc_desc.rb
  35. 1
      src/ruby/lib/grpc/generic/rpc_server.rb
  36. 137
      src/ruby/spec/client_auth_spec.rb
  37. 4
      src/ruby/spec/generic/active_call_spec.rb
  38. 355
      src/ruby/spec/generic/client_stub_spec.rb
  39. 10
      src/ruby/spec/generic/rpc_desc_spec.rb
  40. 145
      src/ruby/spec/generic/rpc_server_spec.rb
  41. 16
      src/ruby/spec/testdata/client.key
  42. 14
      src/ruby/spec/testdata/client.pem
  43. 46
      templates/binding.gyp.template
  44. 8
      test/core/end2end/end2end_nosec_tests.c
  45. 8
      test/core/end2end/end2end_tests.c
  46. 19
      test/core/end2end/fixtures/h2_http_proxy.c
  47. 52
      test/core/end2end/fixtures/http_proxy_fixture.c
  48. 19
      test/core/end2end/fixtures/http_proxy_fixture.h
  49. 14
      test/core/end2end/gen_build_yaml.py
  50. 17
      test/core/end2end/generate_tests.bzl
  51. 235
      test/core/end2end/tests/proxy_auth.c
  52. 3
      test/core/surface/completion_queue_threading_test.c
  53. 1
      tools/doxygen/Doxyfile.core.internal
  54. 2
      tools/mkowners/mkowners.py
  55. 13
      tools/profiling/microbenchmarks/bm_diff/bm_diff.py
  56. 14
      tools/profiling/microbenchmarks/bm_diff/bm_main.py
  57. 21
      tools/profiling/microbenchmarks/bm_diff/bm_run.py
  58. 4
      tools/run_tests/generated/sources_and_headers.json
  59. 55
      tools/run_tests/generated/tests.json
  60. 2
      tools/run_tests/run_tests.py
  61. 1
      vsprojects/vcxproj/grpc/grpc.vcxproj
  62. 3
      vsprojects/vcxproj/grpc/grpc.vcxproj.filters
  63. 1
      vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj
  64. 3
      vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters
  65. 1
      vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj
  66. 3
      vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters
  67. 2
      vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj
  68. 3
      vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters
  69. 2
      vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj
  70. 3
      vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters

@ -723,6 +723,7 @@ grpc_cc_library(
"src/core/lib/iomgr/iomgr.h",
"src/core/lib/iomgr/iomgr_internal.h",
"src/core/lib/iomgr/iomgr_posix.h",
"src/core/lib/iomgr/iomgr_uv.h",
"src/core/lib/iomgr/is_epollexclusive_available.h",
"src/core/lib/iomgr/load_file.h",
"src/core/lib/iomgr/lockfree_event.h",

@ -4364,6 +4364,7 @@ add_library(end2end_tests
test/core/end2end/tests/payload.c
test/core/end2end/tests/ping.c
test/core/end2end/tests/ping_pong_streaming.c
test/core/end2end/tests/proxy_auth.c
test/core/end2end/tests/registered_call.c
test/core/end2end/tests/request_with_flags.c
test/core/end2end/tests/request_with_payload.c
@ -4463,6 +4464,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/payload.c
test/core/end2end/tests/ping.c
test/core/end2end/tests/ping_pong_streaming.c
test/core/end2end/tests/proxy_auth.c
test/core/end2end/tests/registered_call.c
test/core/end2end/tests/request_with_flags.c
test/core/end2end/tests/request_with_payload.c

@ -7962,6 +7962,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \
test/core/end2end/tests/ping_pong_streaming.c \
test/core/end2end/tests/proxy_auth.c \
test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \
@ -8056,6 +8057,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \
test/core/end2end/tests/ping_pong_streaming.c \
test/core/end2end/tests/proxy_auth.c \
test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \

@ -17,7 +17,7 @@ See [INSTALL](INSTALL.md) for installation instructions for various platforms.
See [tools/run_tests](tools/run_tests) for more guidance on how to run various test suites (e.g. unit tests, interop tests, benchmarks)
See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5712453606309888) for the performance numbers for v1.0.x.
See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5636470266134528) for the performance numbers for the latest released version.
# Repository Structure & Status

@ -175,21 +175,28 @@
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
],
},
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
],
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++',
'-std=c++11'
],
}]
]
},
@ -508,6 +515,13 @@
'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
],
}],
@ -626,6 +640,13 @@
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'target_name': 'grpc',
@ -891,6 +912,13 @@
'src/core/ext/filters/workarounds/workaround_utils.c',
'src/core/plugin_registry/grpc_plugin_registry.c',
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
{
'include_dirs': [
@ -916,6 +944,11 @@
'ldflags': [
'-Wl,-wrap,memcpy'
]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
],
"target_name": "grpc_node",

@ -216,6 +216,7 @@ filegroups:
- src/core/lib/iomgr/iomgr.h
- src/core/lib/iomgr/iomgr_internal.h
- src/core/lib/iomgr/iomgr_posix.h
- src/core/lib/iomgr/iomgr_uv.h
- src/core/lib/iomgr/is_epollexclusive_available.h
- src/core/lib/iomgr/load_file.h
- src/core/lib/iomgr/lockfree_event.h
@ -2390,6 +2391,8 @@ targets:
- grpc
- gpr_test_util
- gpr
exclude_iomgrs:
- uv
platforms:
- linux
secure: true
@ -4497,6 +4500,7 @@ targets:
- grpc
- gpr_test_util
- gpr
timeout_seconds: 1200
- name: writes_per_rpc_test
gtest: true
cpu_cost: 0.5

@ -279,6 +279,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr.h',
'src/core/lib/iomgr/iomgr_internal.h',
'src/core/lib/iomgr/iomgr_posix.h',
'src/core/lib/iomgr/iomgr_uv.h',
'src/core/lib/iomgr/is_epollexclusive_available.h',
'src/core/lib/iomgr/load_file.h',
'src/core/lib/iomgr/lockfree_event.h',
@ -767,6 +768,7 @@ Pod::Spec.new do |s|
'src/core/lib/iomgr/iomgr.h',
'src/core/lib/iomgr/iomgr_internal.h',
'src/core/lib/iomgr/iomgr_posix.h',
'src/core/lib/iomgr/iomgr_uv.h',
'src/core/lib/iomgr/is_epollexclusive_available.h',
'src/core/lib/iomgr/load_file.h',
'src/core/lib/iomgr/lockfree_event.h',

@ -211,6 +211,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/iomgr/iomgr.h )
s.files += %w( src/core/lib/iomgr/iomgr_internal.h )
s.files += %w( src/core/lib/iomgr/iomgr_posix.h )
s.files += %w( src/core/lib/iomgr/iomgr_uv.h )
s.files += %w( src/core/lib/iomgr/is_epollexclusive_available.h )
s.files += %w( src/core/lib/iomgr/load_file.h )
s.files += %w( src/core/lib/iomgr/lockfree_event.h )

@ -225,6 +225,7 @@
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_internal.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_posix.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/iomgr_uv.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/is_epollexclusive_available.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/load_file.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/iomgr/lockfree_event.h" role="src" />

@ -30,15 +30,23 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h"
static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) {
/**
* Parses the 'http_proxy' env var and returns the proxy hostname to resolve or
* NULL on error. Also sets 'user_cred' to user credentials if present in the
* 'http_proxy' env var, otherwise leaves it unchanged. It is caller's
* responsibility to gpr_free user_cred.
*/
static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy");
if (uri_str == NULL) return NULL;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
char* proxy_name = NULL;
if (uri == NULL || uri->authority == NULL) {
gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var");
goto done;
@ -47,11 +55,27 @@ static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) {
gpr_log(GPR_ERROR, "'%s' scheme not supported in proxy URI", uri->scheme);
goto done;
}
if (strchr(uri->authority, '@') != NULL) {
gpr_log(GPR_ERROR, "userinfo not supported in proxy URI");
goto done;
/* Split on '@' to separate user credentials from host */
char** authority_strs = NULL;
size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) {
/* User cred not present in authority */
proxy_name = authority_strs[0];
} else if (authority_nstrs == 2) {
/* User cred found */
*user_cred = authority_strs[0];
proxy_name = authority_strs[1];
gpr_log(GPR_DEBUG, "userinfo found in proxy URI");
} else {
/* Bad authority */
for (size_t i = 0; i < authority_nstrs; i++) {
gpr_free(authority_strs[i]);
}
proxy_name = NULL;
}
proxy_name = gpr_strdup(uri->authority);
gpr_free(authority_strs);
done:
gpr_free(uri_str);
grpc_uri_destroy(uri);
@ -64,7 +88,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
const grpc_channel_args* args,
char** name_to_resolve,
grpc_channel_args** new_args) {
*name_to_resolve = grpc_get_http_proxy_server(exec_ctx);
char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false;
grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
@ -73,12 +98,16 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy",
server_uri);
if (uri != NULL) grpc_uri_destroy(uri);
if (uri != NULL) {
gpr_free(user_cred);
grpc_uri_destroy(uri);
}
return false;
}
if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri);
gpr_free(user_cred);
grpc_uri_destroy(uri);
return false;
}
@ -126,10 +155,25 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
}
}
}
grpc_arg new_arg = grpc_channel_arg_string_create(
grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path);
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1);
if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */
char* encoded_user_cred =
grpc_base64_encode(user_cred, strlen(user_cred), 0, 0);
char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred);
args_to_add[1] =
grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header);
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
gpr_free(user_cred);
grpc_uri_destroy(uri);
return true;
}

@ -657,6 +657,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
"ignoring grpc_chttp2_stream with non-client generated index %d",
t->incoming_stream_id));
return init_skip_frame_parser(exec_ctx, t, 1);
} else if (grpc_chttp2_stream_map_size(&t->stream_map) >=
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Max stream count exceeded");
}
t->last_new_stream_id = t->incoming_stream_id;
s = t->incoming_stream =

@ -190,8 +190,11 @@ typedef struct inproc_stream {
static bool inproc_slice_byte_stream_next(grpc_exec_ctx *exec_ctx,
grpc_byte_stream *bs, size_t max,
grpc_closure *on_complete) {
inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs;
return (stream->le->sb.count != 0);
// Because inproc transport always provides the entire message atomically,
// the byte stream always has data available when this function is called.
// Thus, this function always returns true (unlike other transports) and
// there is never any need to schedule a closure
return true;
}
static grpc_error *inproc_slice_byte_stream_pull(grpc_exec_ctx *exec_ctx,

@ -46,6 +46,7 @@
#include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/block_annotate.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd;
static int g_epfd;
@ -78,8 +79,21 @@ static void fd_global_shutdown(void);
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
static const char *kick_state_string(kick_state st) {
switch (st) {
case UNKICKED:
return "UNKICKED";
case KICKED:
return "KICKED";
case DESIGNATED_POLLER:
return "DESIGNATED_POLLER";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
struct grpc_pollset_worker {
kick_state kick_state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv;
grpc_pollset_worker *next;
grpc_pollset_worker *prev;
@ -87,6 +101,12 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work;
};
#define SET_KICK_STATE(worker, state) \
do { \
(worker)->kick_state = (state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
#define MAX_NEIGHBOURHOODS 1024
typedef struct pollset_neighbourhood {
@ -101,10 +121,15 @@ struct grpc_pollset {
bool reassigning_neighbourhood;
grpc_pollset_worker *root_worker;
bool kicked_without_poller;
/* Set to true if the pollset is observed to have no workers available to
* poll */
bool seen_inactive;
bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
bool shutting_down; /* Is the pollset shutting down ? */
grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
/* Number of workers who are *about-to* attach themselves to the pollset
* worker list */
int begin_refs;
grpc_pollset *next;
@ -264,29 +289,23 @@ static bool fd_is_shutdown(grpc_fd *fd) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
sets (This can happen briefly during polling island merges). In such cases
it does not really matter which notifer is set as the read_notifier_pollset
(They would both point to the same polling island anyway) */
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
/*******************************************************************************
@ -411,18 +430,28 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker;
do {
if (worker->initialized_cv) {
worker->kick_state = KICKED;
gpr_cv_signal(&worker->cv);
} else {
worker->kick_state = KICKED;
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_shutdown");
switch (worker->kick_state) {
case KICKED:
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
break;
}
worker = worker->next;
} while (worker != pollset->root_worker);
}
// TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
// in the else case
return error;
}
@ -438,7 +467,9 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_ASSERT(pollset->shutdown_closure == NULL);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure;
pollset->shutting_down = true;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
@ -512,10 +543,14 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
gpr_timespec deadline) {
if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false;
worker->kick_state = UNKICKED;
SET_KICK_STATE(worker, UNKICKED);
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the
// active list
@ -531,6 +566,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu);
gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->kick_state),
is_reassigning);
}
if (pollset->seen_inactive) {
if (neighbourhood != pollset->neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu);
@ -541,8 +581,14 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
pollset->seen_inactive = false;
if (neighbourhood->active_root == NULL) {
neighbourhood->active_root = pollset->next = pollset->prev = pollset;
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
worker->kick_state = DESIGNATED_POLLER;
/* TODO: sreek. Why would this worker state be other than UNKICKED
* here ? (since the worker isn't added to the pollset yet, there is no
* way it can be "found" by other threads to get kicked). */
/* If there is no designated poller, make this the designated poller */
if (worker->kick_state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
}
} else {
pollset->next = neighbourhood->active_root;
@ -556,24 +602,53 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
}
gpr_mu_unlock(&neighbourhood->mu);
}
worker_insert(pollset, worker);
pollset->begin_refs--;
if (worker->kick_state == UNKICKED) {
if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true;
gpr_cv_init(&worker->cv);
while (worker->kick_state == UNKICKED &&
pollset->shutdown_closure == NULL) {
while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->kick_state),
pollset->shutting_down);
}
if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
worker->kick_state == UNKICKED) {
worker->kick_state = KICKED;
/* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
}
}
*now = gpr_now(now->clock_type);
}
return worker->kick_state == DESIGNATED_POLLER &&
pollset->shutdown_closure == NULL;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->kick_state),
pollset->shutting_down, pollset->kicked_without_poller);
}
/* We release pollset lock in this function at a couple of places:
* 1. Briefly when assigning pollset to a neighbourhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
* true, this worker cannot do polling */
/* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
* case; especially when the worker is the DESIGNATED_POLLER */
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
return false;
}
return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
}
static bool check_neighbourhood_for_available_poller(
@ -593,10 +668,18 @@ static bool check_neighbourhood_for_available_poller(
case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) {
inspect_worker->kick_state = DESIGNATED_POLLER;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
inspect_worker);
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv);
}
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
}
}
// even if we didn't win the cas, there's a worker, we can stop
found_worker = true;
@ -609,9 +692,12 @@ static bool check_neighbourhood_for_available_poller(
break;
}
inspect_worker = inspect_worker->next;
} while (inspect_worker != inspect->root_worker);
} while (!found_worker && inspect_worker != inspect->root_worker);
}
if (!found_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true;
if (inspect == neighbourhood->active_root) {
neighbourhood->active_root =
@ -629,15 +715,22 @@ static bool check_neighbourhood_for_available_poller(
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
}
if (worker_hdl != NULL) *worker_hdl = NULL;
worker->kick_state = KICKED;
/* Make sure we appear kicked */
SET_KICK_STATE(worker, KICKED);
grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->kick_state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
worker->next->kick_state = DESIGNATED_POLLER;
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu);
@ -646,9 +739,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
} else {
gpr_atm_no_barrier_store(&g_active_poller, 0);
gpr_mu_unlock(&pollset->mu);
size_t poller_neighbourhood_idx =
(size_t)(pollset->neighbourhood - g_neighbourhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false;
bool scan_state[MAX_NEIGHBOURHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
@ -684,6 +777,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv);
}
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset);
}
@ -704,16 +800,18 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE;
}
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure);
GPR_ASSERT(!pollset->shutting_down);
GPR_ASSERT(!pollset->seen_inactive);
gpr_mu_unlock(&pollset->mu);
append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
err_desc);
gpr_mu_lock(&pollset->mu);
gpr_tls_set(&g_current_thread_worker, 0);
} else {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
}
end_worker(exec_ctx, pollset, &worker, worker_hdl);
gpr_tls_set(&g_current_thread_pollset, 0);
@ -722,46 +820,136 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static grpc_error *pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
gpr_strvec_init(&log);
char *tmp;
gpr_asprintf(
&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
kick_state_string(pollset->root_worker->kick_state),
pollset->root_worker->next,
kick_state_string(pollset->root_worker->next->kick_state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
kick_state_string(specific_worker->kick_state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
gpr_strvec_destroy(&log);
gpr_log(GPR_ERROR, "%s", tmp);
gpr_free(tmp);
}
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) {
pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
}
return GRPC_ERROR_NONE;
}
grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker == next_worker &&
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
root_worker->kick_state = KICKED;
if (root_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
return GRPC_ERROR_NONE;
} else if (next_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
return GRPC_ERROR_NONE;
} else if (root_worker ==
next_worker && // only try and wake up a poller if
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (next_worker->kick_state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv);
next_worker->kick_state = KICKED;
SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv);
return GRPC_ERROR_NONE;
} else if (next_worker->kick_state == DESIGNATED_POLLER) {
if (root_worker->kick_state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
gpr_cv_signal(&root_worker->cv);
}
return GRPC_ERROR_NONE;
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
SET_KICK_STATE(next_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
} else {
GPR_ASSERT(next_worker->kick_state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
return GRPC_ERROR_NONE;
}
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
return GRPC_ERROR_NONE;
}
} else if (specific_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
return GRPC_ERROR_NONE;
} else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) {
specific_worker->kick_state = KICKED;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
return GRPC_ERROR_NONE;
} else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
specific_worker->kick_state = KICKED;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (specific_worker->initialized_cv) {
specific_worker->kick_state = KICKED;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv);
return GRPC_ERROR_NONE;
} else {
specific_worker->kick_state = KICKED;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
return GRPC_ERROR_NONE;
}
}
@ -807,6 +995,7 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
static void shutdown_engine(void) {
fd_global_shutdown();
pollset_global_shutdown();
close(g_epfd);
}
static const grpc_event_engine_vtable vtable = {
@ -843,9 +1032,6 @@ static const grpc_event_engine_vtable vtable = {
/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
* Create a dummy epoll_fd to make sure epoll support is available */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
/* TODO(ctiller): temporary, until this stabilizes */
if (!explicit_request) return NULL;
if (!grpc_has_wakeup_fd()) {
return NULL;
}
@ -864,6 +1050,8 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
return NULL;
}
gpr_log(GPR_ERROR, "grpc epoll fd: %d", g_epfd);
return &vtable;
}

@ -1008,12 +1008,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
/*******************************************************************************
@ -1224,7 +1224,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@ -1236,7 +1236,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,

@ -561,12 +561,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
/*******************************************************************************
@ -697,11 +697,11 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
}
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,

@ -439,12 +439,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
/*******************************************************************************
@ -711,7 +711,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@ -723,7 +723,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
@ -1051,8 +1051,8 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
/* Introduce a spurious completion.
If we do not, then it may be that the fd-specific epoll set consumed
a completion without being polled, leading to a missed edge going up. */
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable;
if (append_error(&error, pollable_materialize(&pollset->pollable),

@ -934,12 +934,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
}
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
}
/*******************************************************************************
@ -1116,7 +1116,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
@ -1128,7 +1128,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
}
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
}
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@ -1733,7 +1733,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux(
if (!is_grpc_wakeup_signal_initialized) {
/* TODO(ctiller): when other epoll engines are ready, remove the true || to
* force this to be explitly chosen if needed */
if (true || explicit_request) {
if (explicit_request) {
grpc_use_signal(SIGRTMIN + 6);
} else {
return NULL;

@ -21,12 +21,20 @@
#ifdef GRPC_UV
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/pollset_uv.h"
#include "src/core/lib/iomgr/tcp_uv.h"
gpr_thd_id g_init_thread;
void grpc_iomgr_platform_init(void) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_global_init();
grpc_register_tracer(&grpc_tcp_trace);
grpc_executor_set_threading(&exec_ctx, false);
g_init_thread = gpr_thd_currentid();
grpc_exec_ctx_finish(&exec_ctx);
}
void grpc_iomgr_platform_flush(void) {}
void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }

@ -0,0 +1,37 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_UV_H
#define GRPC_CORE_LIB_IOMGR_IOMGR_UV_H
#include "src/core/lib/iomgr/iomgr_internal.h"
#include <grpc/support/thd.h>
/* The thread ID of the thread on which grpc was initialized. Used to verify
* that all calls into libuv are made on that same thread */
extern gpr_thd_id g_init_thread;
#ifdef GRPC_UV_THREAD_CHECK
#define GRPC_UV_ASSERT_SAME_THREAD() \
GPR_ASSERT(gpr_thd_currentid() == g_init_thread)
#else
#define GRPC_UV_ASSERT_SAME_THREAD()
#endif /* GRPC_UV_THREAD_CHECK */
#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_UV_H */

@ -79,12 +79,12 @@ bool grpc_lfev_is_shutdown(gpr_atm *state) {
}
void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure) {
grpc_closure *closure, const char *variable) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_notify_on: %p curr=%p closure=%p", state,
(void *)curr, closure);
gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
state, (void *)curr, closure);
}
switch (curr) {
case CLOSURE_NOT_READY: {
@ -149,7 +149,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_shutdown: %p curr=%p err=%s", state,
gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
(void *)curr, grpc_error_string(shutdown_err));
}
switch (curr) {
@ -193,12 +193,14 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
GPR_UNREACHABLE_CODE(return false);
}
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) {
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
const char *variable) {
while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_ready: %p curr=%p", state, (void *)curr);
gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
(void *)curr);
}
switch (curr) {

@ -30,10 +30,11 @@ void grpc_lfev_destroy(gpr_atm *state);
bool grpc_lfev_is_shutdown(gpr_atm *state);
void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure);
grpc_closure *closure, const char *variable);
/* Returns true on first successful shutdown */
bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_error *shutdown_err);
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state);
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
const char *variable);
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */

@ -28,6 +28,7 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/pollset.h"
#include "src/core/lib/iomgr/pollset_uv.h"
@ -70,6 +71,7 @@ void grpc_pollset_global_init(void) {
}
void grpc_pollset_global_shutdown(void) {
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_destroy(&grpc_polling_mu);
uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb);
}
@ -79,6 +81,7 @@ static void timer_run_cb(uv_timer_t *timer) {}
static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; }
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
GRPC_UV_ASSERT_SAME_THREAD();
*mu = &grpc_polling_mu;
uv_timer_init(uv_default_loop(), &pollset->timer);
pollset->shutting_down = 0;
@ -87,6 +90,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) {
GPR_ASSERT(!pollset->shutting_down);
GRPC_UV_ASSERT_SAME_THREAD();
pollset->shutting_down = 1;
if (grpc_pollset_work_run_loop) {
// Drain any pending UV callbacks without blocking
@ -99,6 +103,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
}
void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_close((uv_handle_t *)&pollset->timer, timer_close_cb);
// timer.data is a boolean indicating that the timer has finished closing
pollset->timer.data = (void *)0;
@ -113,6 +118,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker **worker_hdl,
gpr_timespec now, gpr_timespec deadline) {
uint64_t timeout;
GRPC_UV_ASSERT_SAME_THREAD();
gpr_mu_unlock(&grpc_polling_mu);
if (grpc_pollset_work_run_loop) {
if (gpr_time_cmp(deadline, now) >= 0) {
@ -141,6 +147,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_error *grpc_pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) {
GRPC_UV_ASSERT_SAME_THREAD();
uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
return GRPC_ERROR_NONE;
}

@ -30,6 +30,7 @@
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
@ -114,11 +115,14 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_error *error;
int retry_status;
char *port = r->port;
gpr_free(req);
retry_status = retry_named_port_failure(status, r, getaddrinfo_callback);
if (retry_status == 0) {
// The request is being retried. Nothing should be done here
/* The request is being retried. It is using its own port string, so we free
* the original one */
gpr_free(port);
return;
}
/* Either no retry was attempted, or the retry failed. Either way, the
@ -171,6 +175,8 @@ static grpc_error *blocking_resolve_address_impl(
grpc_error *err;
int retry_status;
GRPC_UV_ASSERT_SAME_THREAD();
req.addrinfo = NULL;
err = try_split_host_port(name, default_port, &host, &port);
@ -218,16 +224,19 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
grpc_pollset_set *interested_parties,
grpc_closure *on_done,
grpc_resolved_addresses **addrs) {
uv_getaddrinfo_t *req;
request *r;
struct addrinfo *hints;
char *host;
char *port;
uv_getaddrinfo_t *req = NULL;
request *r = NULL;
struct addrinfo *hints = NULL;
char *host = NULL;
char *port = NULL;
grpc_error *err;
int s;
GRPC_UV_ASSERT_SAME_THREAD();
err = try_split_host_port(name, default_port, &host, &port);
if (err != GRPC_ERROR_NONE) {
GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
gpr_free(host);
gpr_free(port);
return;
}
r = gpr_malloc(sizeof(request));

@ -220,6 +220,11 @@ const char *grpc_sockaddr_get_uri_scheme(
return NULL;
}
int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr) {
const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
return addr->sa_family;
}
int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) {
const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr;
switch (addr->sa_family) {

@ -75,4 +75,6 @@ char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr);
/* Returns the URI scheme corresponding to \a addr */
const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr);
int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr);
#endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */

@ -26,6 +26,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/tcp_uv.h"
@ -124,6 +125,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
(void)channel_args;
(void)interested_parties;
GRPC_UV_ASSERT_SAME_THREAD();
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {

@ -20,6 +20,7 @@
#ifdef GRPC_UV
#include <assert.h>
#include <string.h>
#include <grpc/support/alloc.h>
@ -27,6 +28,7 @@
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/tcp_server.h"
@ -43,6 +45,8 @@ struct grpc_tcp_listener {
struct grpc_tcp_listener *next;
bool closed;
bool has_pending_connection;
};
struct grpc_tcp_server {
@ -104,6 +108,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
}
grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
GRPC_UV_ASSERT_SAME_THREAD();
gpr_ref(&s->refs);
return s;
}
@ -168,6 +173,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
GRPC_UV_ASSERT_SAME_THREAD();
if (gpr_unref(&s->refs)) {
/* Complete shutdown_starting work before destroying. */
grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
@ -183,18 +189,49 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
}
}
static void accepted_connection_close_cb(uv_handle_t *handle) {
gpr_free(handle);
}
static void on_connect(uv_stream_t *server, int status) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) {
grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
uv_tcp_t *client;
grpc_endpoint *ep = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resolved_address peer_name;
char *peer_name_string;
int err;
uv_tcp_t *server = sp->handle;
client = gpr_malloc(sizeof(uv_tcp_t));
uv_tcp_init(uv_default_loop(), client);
// UV documentation says this is guaranteed to succeed
uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
peer_name_string = NULL;
memset(&peer_name, 0, sizeof(grpc_resolved_address));
peer_name.len = sizeof(struct sockaddr_storage);
err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
(int *)&peer_name.len);
if (err == 0) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(err));
}
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
if (peer_name_string) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string);
} else {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server);
}
}
ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
acceptor);
gpr_free(peer_name_string);
}
static void on_connect(uv_stream_t *server, int status) {
grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (status < 0) {
switch (status) {
@ -207,35 +244,19 @@ static void on_connect(uv_stream_t *server, int status) {
}
}
client = gpr_malloc(sizeof(uv_tcp_t));
uv_tcp_init(uv_default_loop(), client);
// UV documentation says this is guaranteed to succeed
uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
// If the server has not been started, we discard incoming connections
if (sp->server->on_accept_cb == NULL) {
uv_close((uv_handle_t *)client, accepted_connection_close_cb);
GPR_ASSERT(!sp->has_pending_connection);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server);
}
// Create acceptor.
if (sp->server->on_accept_cb) {
finish_accept(&exec_ctx, sp);
} else {
peer_name_string = NULL;
memset(&peer_name, 0, sizeof(grpc_resolved_address));
peer_name.len = sizeof(struct sockaddr_storage);
err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
(int *)&peer_name.len);
if (err == 0) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
}
ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
// Create acceptor.
grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
acceptor->from_server = sp->server;
acceptor->port_index = sp->port_index;
acceptor->fd_index = 0;
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
acceptor);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(peer_name_string);
sp->has_pending_connection = true;
}
grpc_exec_ctx_finish(&exec_ctx);
}
static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
@ -282,7 +303,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle,
GPR_ASSERT(port >= 0);
GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
sp = gpr_malloc(sizeof(grpc_tcp_listener));
sp = gpr_zalloc(sizeof(grpc_tcp_listener));
sp->next = NULL;
if (s->head == NULL) {
s->head = sp;
@ -316,6 +337,9 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
unsigned port_index = 0;
int status;
grpc_error *error = GRPC_ERROR_NONE;
int family;
GRPC_UV_ASSERT_SAME_THREAD();
if (s->tail != NULL) {
port_index = s->tail->port_index + 1;
@ -353,7 +377,18 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
}
handle = gpr_malloc(sizeof(uv_tcp_t));
status = uv_tcp_init(uv_default_loop(), handle);
family = grpc_sockaddr_get_family(addr);
status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family);
#if defined(GPR_LINUX) && defined(SO_REUSEPORT)
if (family == AF_INET || family == AF_INET6) {
int fd;
uv_fileno((uv_handle_t *)handle, &fd);
int enable = 1;
setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable));
}
#endif /* GPR_LINUX && SO_REUSEPORT */
if (status == 0) {
error = add_socket_to_server(s, handle, addr, port_index, &sp);
} else {
@ -366,6 +401,18 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
gpr_free(allocated_addr);
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
char *port_string;
grpc_sockaddr_to_string(&port_string, addr, 0);
const char *str = grpc_error_string(error);
if (port_string) {
gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_free(port_string);
} else {
gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str);
}
}
if (error != GRPC_ERROR_NONE) {
grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
@ -385,13 +432,19 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server,
grpc_tcp_listener *sp;
(void)pollsets;
(void)pollset_count;
GRPC_UV_ASSERT_SAME_THREAD();
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "SERVER_START %p", server);
}
GPR_ASSERT(on_accept_cb);
GPR_ASSERT(!server->on_accept_cb);
server->on_accept_cb = on_accept_cb;
server->on_accept_cb_arg = cb_arg;
for (sp = server->head; sp; sp = sp->next) {
GPR_ASSERT(uv_listen((uv_stream_t *)sp->handle, SOMAXCONN, on_connect) ==
0);
if (sp->has_pending_connection) {
finish_accept(exec_ctx, sp);
sp->has_pending_connection = false;
}
}
}

@ -30,6 +30,7 @@
#include <grpc/support/string_util.h>
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/network_status_tracker.h"
#include "src/core/lib/iomgr/resource_quota.h"
#include "src/core/lib/iomgr/tcp_uv.h"
@ -183,6 +184,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_tcp *tcp = (grpc_tcp *)ep;
int status;
grpc_error *error = GRPC_ERROR_NONE;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(tcp->read_cb == NULL);
tcp->read_cb = cb;
tcp->read_slices = read_slices;
@ -236,6 +238,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
unsigned int i;
grpc_slice *slice;
uv_write_t *write_req;
GRPC_UV_ASSERT_SAME_THREAD();
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t j;
@ -307,6 +310,10 @@ static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_error *why) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
const char *str = grpc_error_string(why);
gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str);
}
tcp->shutting_down = true;
uv_shutdown_t *req = &tcp->shutdown_req;
uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);

@ -24,6 +24,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/iomgr_uv.h"
#include "src/core/lib/iomgr/timer.h"
#include <uv.h>
@ -43,6 +44,7 @@ static void stop_uv_timer(uv_timer_t *handle) {
void run_expired_timer(uv_timer_t *handle) {
grpc_timer *timer = (grpc_timer *)handle->data;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GRPC_UV_ASSERT_SAME_THREAD();
GPR_ASSERT(timer->pending);
timer->pending = 0;
GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
@ -55,6 +57,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
gpr_timespec now) {
uint64_t timeout;
uv_timer_t *uv_timer;
GRPC_UV_ASSERT_SAME_THREAD();
timer->closure = closure;
if (gpr_time_cmp(deadline, now) <= 0) {
timer->pending = 0;
@ -75,6 +78,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
}
void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
GRPC_UV_ASSERT_SAME_THREAD();
if (timer->pending) {
timer->pending = 0;
GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);

@ -23,13 +23,22 @@
#ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL
namespace grpc {
namespace {
ThreadPoolInterface* CreateDefaultThreadPool() {
ThreadPoolInterface* CreateDefaultThreadPoolImpl() {
int cores = gpr_cpu_num_cores();
if (!cores) cores = 4;
return new DynamicThreadPool(cores);
}
CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl;
} // namespace
ThreadPoolInterface* CreateDefaultThreadPool() { return g_ctp_impl(); }
void SetCreateThreadPool(CreateThreadPoolFunc func) { g_ctp_impl = func; }
} // namespace grpc
#endif // !GRPC_CUSTOM_DEFAULT_THREAD_POOL

@ -32,6 +32,10 @@ class ThreadPoolInterface {
virtual void Add(const std::function<void()>& callback) = 0;
};
// Allows different codebases to use their own thread pool impls
typedef ThreadPoolInterface* (*CreateThreadPoolFunc)(void);
void SetCreateThreadPool(CreateThreadPoolFunc func);
ThreadPoolInterface* CreateDefaultThreadPool();
} // namespace grpc

@ -40,13 +40,13 @@ end
module GRPC
# The ActiveCall class provides simple methods for sending marshallable
# data to a call
class ActiveCall
class ActiveCall # rubocop:disable Metrics/ClassLength
include Core::TimeConsts
include Core::CallOps
extend Forwardable
attr_reader :deadline, :metadata_sent, :metadata_to_send
attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert
def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=,
:peer, :peer_cert, :trailing_metadata
:trailing_metadata, :status
# client_invoke begins a client invocation.
#
@ -100,6 +100,18 @@ module GRPC
fail(ArgumentError, 'Already sent md') if started && metadata_to_send
@metadata_to_send = metadata_to_send || {} unless started
@send_initial_md_mutex = Mutex.new
@output_stream_done = false
@input_stream_done = false
@call_finished = false
@call_finished_mu = Mutex.new
@client_call_executed = false
@client_call_executed_mu = Mutex.new
# set the peer now so that the accessor can still function
# after the server closes the call
@peer = call.peer
end
# Sends the initial metadata that has yet to be sent.
@ -142,11 +154,9 @@ module GRPC
Operation.new(self)
end
# finished waits until a client call is completed.
#
# It blocks until the remote endpoint acknowledges by sending a status.
def finished
def receive_and_check_status
batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
set_input_stream_done
attach_status_results_and_complete_call(batch_result)
end
@ -155,8 +165,6 @@ module GRPC
@call.trailing_metadata = recv_status_batch_result.status.metadata
end
@call.status = recv_status_batch_result.status
@call.close
op_is_done
# The RECV_STATUS in run_batch always succeeds
# Check the status for a bad status or failed run batch
@ -193,9 +201,19 @@ module GRPC
}
ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished
@call.run_batch(ops)
set_output_stream_done
nil
end
# Intended for use on server-side calls when a single request from
# the client is expected (i.e., unary and server-streaming RPC types).
def read_unary_request
req = remote_read
set_input_stream_done
req
end
def server_unary_response(req, trailing_metadata: {},
code: Core::StatusCodes::OK, details: 'OK')
ops = {}
@ -211,6 +229,7 @@ module GRPC
ops[RECV_CLOSE_ON_SERVER] = nil
@call.run_batch(ops)
set_output_stream_done
end
# remote_read reads a response from the remote endpoint.
@ -241,6 +260,8 @@ module GRPC
# each_remote_read passes each response to the given block or returns an
# enumerator the responses if no block is given.
# Used to generate the request enumerable for
# server-side client-streaming RPC's.
#
# == Enumerator ==
#
@ -258,10 +279,14 @@ module GRPC
# @return [Enumerator] if no block was given
def each_remote_read
return enum_for(:each_remote_read) unless block_given?
loop do
resp = remote_read
break if resp.nil? # the last response was received
yield resp
begin
loop do
resp = remote_read
break if resp.nil? # the last response was received
yield resp
end
ensure
set_input_stream_done
end
end
@ -287,13 +312,17 @@ module GRPC
# @return [Enumerator] if no block was given
def each_remote_read_then_finish
return enum_for(:each_remote_read_then_finish) unless block_given?
loop do
resp = remote_read
if resp.nil? # the last response was received, but not finished yet
finished
break
begin
loop do
resp = remote_read
if resp.nil? # the last response was received
receive_and_check_status
break
end
yield resp
end
yield resp
ensure
set_input_stream_done
end
end
@ -305,6 +334,7 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def request_response(req, metadata: {})
raise_error_if_already_executed
ops = {
SEND_MESSAGE => @marshal.call(req),
SEND_CLOSE_FROM_CLIENT => nil,
@ -319,7 +349,15 @@ module GRPC
end
@metadata_sent = true
end
batch_result = @call.run_batch(ops)
begin
batch_result = @call.run_batch(ops)
# no need to check for cancellation after a CallError because this
# batch contains a RECV_STATUS op
ensure
set_input_stream_done
set_output_stream_done
end
@call.metadata = batch_result.metadata
attach_status_results_and_complete_call(batch_result)
@ -339,10 +377,20 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Object] the response received from the server
def client_streamer(requests, metadata: {})
# Metadata might have already been sent if this is an operation view
merge_metadata_and_send_if_not_already_sent(metadata)
raise_error_if_already_executed
begin
merge_metadata_and_send_if_not_already_sent(metadata)
requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) }
rescue GRPC::Core::CallError => e
receive_and_check_status # check for Cancelled
raise e
rescue => e
set_input_stream_done
raise e
ensure
set_output_stream_done
end
requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) }
batch_result = @call.run_batch(
SEND_CLOSE_FROM_CLIENT => nil,
RECV_INITIAL_METADATA => nil,
@ -350,12 +398,11 @@ module GRPC
RECV_STATUS_ON_CLIENT => nil
)
set_input_stream_done
@call.metadata = batch_result.metadata
attach_status_results_and_complete_call(batch_result)
get_message_from_batch_result(batch_result)
rescue GRPC::Core::CallError => e
finished # checks for Cancelled
raise e
end
# server_streamer sends one request to the GRPC server, which yields a
@ -373,6 +420,7 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Enumerator|nil] a response Enumerator
def server_streamer(req, metadata: {})
raise_error_if_already_executed
ops = {
SEND_MESSAGE => @marshal.call(req),
SEND_CLOSE_FROM_CLIENT => nil
@ -384,13 +432,22 @@ module GRPC
end
@metadata_sent = true
end
@call.run_batch(ops)
begin
@call.run_batch(ops)
rescue GRPC::Core::CallError => e
receive_and_check_status # checks for Cancelled
raise e
rescue => e
set_input_stream_done
raise e
ensure
set_output_stream_done
end
replies = enum_for(:each_remote_read_then_finish)
return replies unless block_given?
replies.each { |r| yield r }
rescue GRPC::Core::CallError => e
finished # checks for Cancelled
raise e
end
# bidi_streamer sends a stream of requests to the GRPC server, and yields
@ -421,6 +478,7 @@ module GRPC
# a list, multiple metadata for its key are sent
# @return [Enumerator, nil] a response Enumerator
def bidi_streamer(requests, metadata: {}, &blk)
raise_error_if_already_executed
# Metadata might have already been sent if this is an operation view
merge_metadata_and_send_if_not_already_sent(metadata)
bd = BidiCall.new(@call,
@ -428,7 +486,10 @@ module GRPC
@unmarshal,
metadata_received: @metadata_received)
bd.run_on_client(requests, @op_notifier, &blk)
bd.run_on_client(requests,
proc { set_input_stream_done },
proc { set_output_stream_done },
&blk)
end
# run_server_bidi orchestrates a BiDi stream processing on a server.
@ -449,7 +510,7 @@ module GRPC
metadata_received: @metadata_received,
req_view: MultiReqView.new(self))
bd.run_on_server(gen_each_reply)
bd.run_on_server(gen_each_reply, proc { set_input_stream_done })
end
# Waits till an operation completes
@ -459,7 +520,8 @@ module GRPC
@op_notifier.wait
end
# Signals that an operation is done
# Signals that an operation is done.
# Only relevant on the client-side (this is a no-op on the server-side)
def op_is_done
return if @op_notifier.nil?
@op_notifier.notify(self)
@ -484,8 +546,40 @@ module GRPC
end
end
def attach_peer_cert(peer_cert)
@peer_cert = peer_cert
end
private
# To be called once the "input stream" has been completelly
# read through (i.e, done reading from client or received status)
# note this is idempotent
def set_input_stream_done
@call_finished_mu.synchronize do
@input_stream_done = true
maybe_finish_and_close_call_locked
end
end
# To be called once the "output stream" has been completelly
# sent through (i.e, done sending from client or sent status)
# note this is idempotent
def set_output_stream_done
@call_finished_mu.synchronize do
@output_stream_done = true
maybe_finish_and_close_call_locked
end
end
def maybe_finish_and_close_call_locked
return unless @output_stream_done && @input_stream_done
return if @call_finished
@call_finished = true
op_is_done
@call.close
end
# Starts the call if not already started
# @param metadata [Hash] metadata to be sent to the server. If a value is
# a list, multiple metadata for its key are sent
@ -493,6 +587,15 @@ module GRPC
merge_metadata_to_send(metadata) && send_initial_metadata
end
def raise_error_if_already_executed
@client_call_executed_mu.synchronize do
if @client_call_executed
fail GRPC::Core::CallError, 'attempting to re-run a call'
end
@client_call_executed = true
end
end
def self.view_class(*visible_methods)
Class.new do
extend ::Forwardable
@ -518,6 +621,7 @@ module GRPC
# server client_streamer handlers.
MultiReqView = view_class(:cancelled?, :deadline,
:each_remote_read, :metadata, :output_metadata,
:peer, :peer_cert,
:send_initial_metadata,
:metadata_to_send,
:merge_metadata_to_send,

@ -62,12 +62,19 @@ module GRPC
# block that can be invoked with each response.
#
# @param requests the Enumerable of requests to send
# @param op_notifier a Notifier used to signal completion
# @param set_input_stream_done [Proc] called back when we're done
# reading the input stream
# @param set_input_stream_done [Proc] called back when we're done
# sending data on the output stream
# @return an Enumerator of requests to yield
def run_on_client(requests, op_notifier, &blk)
@op_notifier = op_notifier
@enq_th = Thread.new { write_loop(requests) }
read_loop(&blk)
def run_on_client(requests,
set_input_stream_done,
set_output_stream_done,
&blk)
@enq_th = Thread.new do
write_loop(requests, set_output_stream_done: set_output_stream_done)
end
read_loop(set_input_stream_done, &blk)
end
# Begins orchestration of the Bidi stream for a server generating replies.
@ -81,12 +88,17 @@ module GRPC
# produced by gen_each_reply could ignore the received_msgs
#
# @param gen_each_reply [Proc] generates the BiDi stream replies.
def run_on_server(gen_each_reply)
# @param set_input_steam_done [Proc] call back to call when
# the reads have been completely read through.
def run_on_server(gen_each_reply, set_input_stream_done)
# Pass in the optional call object parameter if possible
if gen_each_reply.arity == 1
replys = gen_each_reply.call(read_loop(is_client: false))
replys = gen_each_reply.call(
read_loop(set_input_stream_done, is_client: false))
elsif gen_each_reply.arity == 2
replys = gen_each_reply.call(read_loop(is_client: false), @req_view)
replys = gen_each_reply.call(
read_loop(set_input_stream_done, is_client: false),
@req_view)
else
fail 'Illegal arity of reply generator'
end
@ -99,22 +111,6 @@ module GRPC
END_OF_READS = :end_of_reads
END_OF_WRITES = :end_of_writes
# signals that bidi operation is complete
def notify_done
return unless @op_notifier
GRPC.logger.debug("bidi-notify-done: notifying #{@op_notifier}")
@op_notifier.notify(self)
end
# signals that a bidi operation is complete (read + write)
def finished
@done_mutex.synchronize do
return unless @reads_complete && @writes_complete && !@complete
@call.close
@complete = true
end
end
# performs a read using @call.run_batch, ensures metadata is set up
def read_using_run_batch
ops = { RECV_MESSAGE => nil }
@ -127,7 +123,8 @@ module GRPC
batch_result
end
def write_loop(requests, is_client: true)
# set_output_stream_done is relevant on client-side
def write_loop(requests, is_client: true, set_output_stream_done: nil)
GRPC.logger.debug('bidi-write-loop: starting')
count = 0
requests.each do |req|
@ -151,23 +148,20 @@ module GRPC
GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting")
@call.run_batch(SEND_CLOSE_FROM_CLIENT => nil)
GRPC.logger.debug('bidi-write-loop: done')
notify_done
@writes_complete = true
finished
end
GRPC.logger.debug('bidi-write-loop: finished')
rescue StandardError => e
GRPC.logger.warn('bidi-write-loop: failed')
GRPC.logger.warn(e)
notify_done
@writes_complete = true
finished
raise e
ensure
set_output_stream_done.call if is_client
end
# Provides an enumerator that yields results of remote reads
def read_loop(is_client: true)
def read_loop(set_input_stream_done, is_client: true)
return enum_for(:read_loop,
set_input_stream_done,
is_client: is_client) unless block_given?
GRPC.logger.debug('bidi-read-loop: starting')
begin
@ -201,10 +195,10 @@ module GRPC
GRPC.logger.warn('bidi: read-loop failed')
GRPC.logger.warn(e)
raise e
ensure
set_input_stream_done.call
end
GRPC.logger.debug('bidi-read-loop: finished')
@reads_complete = true
finished
# Make sure that the write loop is done done before finishing the call.
# Note that blocking is ok at this point because we've already received
# a status

@ -48,7 +48,7 @@ module GRPC
end
def handle_request_response(active_call, mth)
req = active_call.remote_read
req = active_call.read_unary_request
resp = mth.call(req, active_call.single_req_view)
active_call.server_unary_response(
resp, trailing_metadata: active_call.output_metadata)
@ -61,7 +61,7 @@ module GRPC
end
def handle_server_streamer(active_call, mth)
req = active_call.remote_read
req = active_call.read_unary_request
replys = mth.call(req, active_call.single_req_view)
replys.each { |r| active_call.remote_send(r) }
send_status(active_call, OK, 'OK', active_call.output_metadata)

@ -418,6 +418,7 @@ module GRPC
metadata_received: true,
started: false,
metadata_to_send: connect_md)
c.attach_peer_cert(an_rpc.call.peer_cert)
mth = an_rpc.method.to_sym
[c, mth]
end

@ -0,0 +1,137 @@
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require 'grpc'
def create_channel_creds
test_root = File.join(File.dirname(__FILE__), 'testdata')
files = ['ca.pem', 'client.key', 'client.pem']
creds = files.map { |f| File.open(File.join(test_root, f)).read }
GRPC::Core::ChannelCredentials.new(creds[0], creds[1], creds[2])
end
def client_cert
test_root = File.join(File.dirname(__FILE__), 'testdata')
cert = File.open(File.join(test_root, 'client.pem')).read
fail unless cert.is_a?(String)
cert
end
def create_server_creds
test_root = File.join(File.dirname(__FILE__), 'testdata')
p "test root: #{test_root}"
files = ['ca.pem', 'server1.key', 'server1.pem']
creds = files.map { |f| File.open(File.join(test_root, f)).read }
GRPC::Core::ServerCredentials.new(
creds[0],
[{ private_key: creds[1], cert_chain: creds[2] }],
true) # force client auth
end
# A test message
class EchoMsg
def self.marshal(_o)
''
end
def self.unmarshal(_o)
EchoMsg.new
end
end
# a test service that checks the cert of its peer
class SslTestService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg
rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg)
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
def check_peer_cert(call)
error_msg = "want:\n#{client_cert}\n\ngot:\n#{call.peer_cert}"
fail(error_msg) unless call.peer_cert == client_cert
end
def an_rpc(req, call)
check_peer_cert(call)
req
end
def a_client_streaming_rpc(call)
check_peer_cert(call)
call.each_remote_read.each { |r| p r }
EchoMsg.new
end
def a_server_streaming_rpc(_, call)
check_peer_cert(call)
[EchoMsg.new, EchoMsg.new]
end
def a_bidi_rpc(requests, call)
check_peer_cert(call)
requests.each { |r| p r }
[EchoMsg.new, EchoMsg.new]
end
end
SslTestServiceStub = SslTestService.rpc_stub_class
describe 'client-server auth' do
RpcServer = GRPC::RpcServer
before(:all) do
server_opts = {
poll_period: 1
}
@srv = RpcServer.new(**server_opts)
port = @srv.add_http2_port('0.0.0.0:0', create_server_creds)
@srv.handle(SslTestService)
@srv_thd = Thread.new { @srv.run }
@srv.wait_till_running
client_opts = {
channel_args: {
GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr'
}
}
@stub = SslTestServiceStub.new("localhost:#{port}",
create_channel_creds,
**client_opts)
end
after(:all) do
expect(@srv.stopped?).to be(false)
@srv.stop
@srv_thd.join
end
it 'client-server auth with unary RPCs' do
@stub.an_rpc(EchoMsg.new)
end
it 'client-server auth with client streaming RPCs' do
@stub.a_client_streaming_rpc([EchoMsg.new, EchoMsg.new])
end
it 'client-server auth with server streaming RPCs' do
responses = @stub.a_server_streaming_rpc(EchoMsg.new)
responses.each { |r| p r }
end
it 'client-server auth with bidi RPCs' do
responses = @stub.a_bidi_rpc([EchoMsg.new, EchoMsg.new])
responses.each { |r| p r }
end
end

@ -473,7 +473,7 @@ describe GRPC::ActiveCall do
server_call.remote_send('server_response')
expect(client_call.remote_read).to eq('server_response')
server_call.send_status(OK, 'status code is OK')
expect { client_call.finished }.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error
end
it 'finishes ok if the server sends an early status response' do
@ -490,7 +490,7 @@ describe GRPC::ActiveCall do
expect do
call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil)
end.to_not raise_error
expect { client_call.finished }.to_not raise_error
expect { client_call.receive_and_check_status }.to_not raise_error
end
it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do

@ -36,6 +36,53 @@ include GRPC::Core::StatusCodes
include GRPC::Core::TimeConsts
include GRPC::Core::CallOps
# check that methods on a finished/closed call t crash
def check_op_view_of_finished_client_call(op_view,
expected_metadata,
expected_trailing_metadata)
# use read_response_stream to try to iterate through
# possible response stream
fail('need something to attempt reads') unless block_given?
expect do
resp = op_view.execute
yield resp
end.to raise_error(GRPC::Core::CallError)
expect { op_view.start_call }.to raise_error(RuntimeError)
sanity_check_values_of_accessors(op_view,
expected_metadata,
expected_trailing_metadata)
expect do
op_view.wait
op_view.cancel
op_view.write_flag = 1
end.to_not raise_error
end
def sanity_check_values_of_accessors(op_view,
expected_metadata,
expected_trailing_metadata)
expected_status = Struct::Status.new
expected_status.code = 0
expected_status.details = 'OK'
expected_status.metadata = expected_trailing_metadata
expect(op_view.status).to eq(expected_status)
expect(op_view.metadata).to eq(expected_metadata)
expect(op_view.trailing_metadata).to eq(expected_trailing_metadata)
expect(op_view.cancelled?).to be(false)
expect(op_view.write_flag).to be(nil)
# The deadline attribute of a call can be either
# a GRPC::Core::TimeSpec or a Time, which are mutually exclusive.
# TODO: fix so that the accessor always returns the same type.
expect(op_view.deadline.is_a?(GRPC::Core::TimeSpec) ||
op_view.deadline.is_a?(Time)).to be(true)
end
describe 'ClientStub' do
let(:noop) { proc { |x| x } }
@ -45,6 +92,7 @@ describe 'ClientStub' do
@method = 'an_rpc_method'
@pass = OK
@fail = INTERNAL
@metadata = { k1: 'v1', k2: 'v2' }
end
after(:each) do
@ -107,7 +155,7 @@ describe 'ClientStub' do
end
end
describe '#request_response' do
describe '#request_response', request_response: true do
before(:each) do
@sent_msg, @resp = 'a_msg', 'a_reply'
end
@ -126,7 +174,7 @@ describe 'ClientStub' do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_request_response(@sent_msg, @resp, @pass,
k1: 'v1', k2: 'v2')
expected_metadata: { k1: 'v1', k2: 'v2' })
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
expect(get_response(stub)).to eq(@resp)
th.join
@ -187,13 +235,24 @@ describe 'ClientStub' do
# Kill the server thread so tests can complete
th.kill
end
it 'should raise ArgumentError if metadata contains invalid values' do
@metadata.merge!(k3: 3)
server_port = create_test_server
host = "localhost:#{server_port}"
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
expect do
get_response(stub)
end.to raise_error(ArgumentError,
/Header values must be of type string or array/)
end
end
describe 'without a call operation' do
def get_response(stub, credentials: nil)
puts credentials.inspect
stub.request_response(@method, @sent_msg, noop, noop,
metadata: { k1: 'v1', k2: 'v2' },
metadata: @metadata,
credentials: credentials)
end
@ -201,40 +260,62 @@ describe 'ClientStub' do
end
describe 'via a call operation' do
after(:each) do
# make sure op.wait doesn't hang, even if there's a bad status
@op.wait
end
def get_response(stub, run_start_call_first: false, credentials: nil)
op = stub.request_response(@method, @sent_msg, noop, noop,
return_op: true,
metadata: { k1: 'v1', k2: 'v2' },
deadline: from_relative_time(2),
credentials: credentials)
expect(op).to be_a(GRPC::ActiveCall::Operation)
op.start_call if run_start_call_first
result = op.execute
op.wait # make sure wait doesn't hang
@op = stub.request_response(@method, @sent_msg, noop, noop,
return_op: true,
metadata: @metadata,
deadline: from_relative_time(2),
credentials: credentials)
expect(@op).to be_a(GRPC::ActiveCall::Operation)
@op.start_call if run_start_call_first
result = @op.execute
result
end
it_behaves_like 'request response'
it 'sends metadata to the server ok when running start_call first' do
def run_op_view_metadata_test(run_start_call_first)
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_request_response(@sent_msg, @resp, @pass,
k1: 'v1', k2: 'v2')
@server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' }
@server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' }
th = run_request_response(
@sent_msg, @resp, @pass,
expected_metadata: @metadata,
server_initial_md: @server_initial_md,
server_trailing_md: @server_trailing_md)
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
expect(get_response(stub)).to eq(@resp)
expect(
get_response(stub,
run_start_call_first: run_start_call_first)).to eq(@resp)
th.join
end
it 'sends metadata to the server ok when running start_call first' do
run_op_view_metadata_test(true)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) { |r| p r }
end
it 'does not crash when used after the call has been finished' do
run_op_view_metadata_test(false)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) { |r| p r }
end
end
end
describe '#client_streamer' do
describe '#client_streamer', client_streamer: true do
before(:each) do
Thread.abort_on_exception = true
server_port = create_test_server
host = "localhost:#{server_port}"
@stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
@metadata = { k1: 'v1', k2: 'v2' }
@sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
@resp = 'a_reply'
end
@ -247,7 +328,8 @@ describe 'ClientStub' do
end
it 'should send metadata to the server ok' do
th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata)
th = run_client_streamer(@sent_msgs, @resp, @pass,
expected_metadata: @metadata)
expect(get_response(@stub)).to eq(@resp)
th.join
end
@ -278,27 +360,50 @@ describe 'ClientStub' do
end
describe 'via a call operation' do
after(:each) do
# make sure op.wait doesn't hang, even if there's a bad status
@op.wait
end
def get_response(stub, run_start_call_first: false)
op = stub.client_streamer(@method, @sent_msgs, noop, noop,
return_op: true, metadata: @metadata)
expect(op).to be_a(GRPC::ActiveCall::Operation)
op.start_call if run_start_call_first
result = op.execute
op.wait # make sure wait doesn't hang
@op = stub.client_streamer(@method, @sent_msgs, noop, noop,
return_op: true, metadata: @metadata)
expect(@op).to be_a(GRPC::ActiveCall::Operation)
@op.start_call if run_start_call_first
result = @op.execute
result
end
it_behaves_like 'client streaming'
it 'sends metadata to the server ok when running start_call first' do
th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata)
expect(get_response(@stub, run_start_call_first: true)).to eq(@resp)
def run_op_view_metadata_test(run_start_call_first)
@server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' }
@server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' }
th = run_client_streamer(
@sent_msgs, @resp, @pass,
expected_metadata: @metadata,
server_initial_md: @server_initial_md,
server_trailing_md: @server_trailing_md)
expect(
get_response(@stub,
run_start_call_first: run_start_call_first)).to eq(@resp)
th.join
end
it 'sends metadata to the server ok when running start_call first' do
run_op_view_metadata_test(true)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) { |r| p r }
end
it 'does not crash when used after the call has been finished' do
run_op_view_metadata_test(false)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) { |r| p r }
end
end
end
describe '#server_streamer' do
describe '#server_streamer', server_streamer: true do
before(:each) do
@sent_msg = 'a_msg'
@replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
@ -328,18 +433,42 @@ describe 'ClientStub' do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_server_streamer(@sent_msg, @replys, @fail,
k1: 'v1', k2: 'v2')
expected_metadata: { k1: 'v1', k2: 'v2' })
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
e = get_responses(stub)
expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
th.join
end
it 'should raise ArgumentError if metadata contains invalid values' do
@metadata.merge!(k3: 3)
server_port = create_test_server
host = "localhost:#{server_port}"
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
expect do
get_responses(stub)
end.to raise_error(ArgumentError,
/Header values must be of type string or array/)
end
it 'the call terminates when there is an unmarshalling error' do
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_server_streamer(@sent_msg, @replys, @pass)
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
unmarshal = proc { fail(ArgumentError, 'test unmarshalling error') }
expect do
get_responses(stub, unmarshal: unmarshal).collect { |r| r }
end.to raise_error(ArgumentError, 'test unmarshalling error')
th.join
end
end
describe 'without a call operation' do
def get_responses(stub)
e = stub.server_streamer(@method, @sent_msg, noop, noop,
metadata: { k1: 'v1', k2: 'v2' })
def get_responses(stub, unmarshal: noop)
e = stub.server_streamer(@method, @sent_msg, noop, unmarshal,
metadata: @metadata)
expect(e).to be_a(Enumerator)
e
end
@ -351,10 +480,10 @@ describe 'ClientStub' do
after(:each) do
@op.wait # make sure wait doesn't hang
end
def get_responses(stub, run_start_call_first: false)
@op = stub.server_streamer(@method, @sent_msg, noop, noop,
def get_responses(stub, run_start_call_first: false, unmarshal: noop)
@op = stub.server_streamer(@method, @sent_msg, noop, unmarshal,
return_op: true,
metadata: { k1: 'v1', k2: 'v2' })
metadata: @metadata)
expect(@op).to be_a(GRPC::ActiveCall::Operation)
@op.start_call if run_start_call_first
e = @op.execute
@ -364,20 +493,41 @@ describe 'ClientStub' do
it_behaves_like 'server streaming'
it 'should send metadata to the server ok when start_call is run first' do
def run_op_view_metadata_test(run_start_call_first)
server_port = create_test_server
host = "localhost:#{server_port}"
th = run_server_streamer(@sent_msg, @replys, @fail,
k1: 'v1', k2: 'v2')
@server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' }
@server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' }
th = run_server_streamer(
@sent_msg, @replys, @pass,
expected_metadata: @metadata,
server_initial_md: @server_initial_md,
server_trailing_md: @server_trailing_md)
stub = GRPC::ClientStub.new(host, :this_channel_is_insecure)
e = get_responses(stub, run_start_call_first: true)
expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
e = get_responses(stub, run_start_call_first: run_start_call_first)
expect(e.collect { |r| r }).to eq(@replys)
th.join
end
it 'should send metadata to the server ok when start_call is run first' do
run_op_view_metadata_test(true)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) do |responses|
responses.each { |r| p r }
end
end
it 'does not crash when used after the call has been finished' do
run_op_view_metadata_test(false)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) do |responses|
responses.each { |r| p r }
end
end
end
end
describe '#bidi_streamer' do
describe '#bidi_streamer', bidi: true do
before(:each) do
@sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s }
@replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s }
@ -386,7 +536,7 @@ describe 'ClientStub' do
end
shared_examples 'bidi streaming' do
it 'supports sending all the requests first', bidi: true do
it 'supports sending all the requests first' do
th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
@pass)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
@ -395,7 +545,7 @@ describe 'ClientStub' do
th.join
end
it 'supports client-initiated ping pong', bidi: true do
it 'supports client-initiated ping pong' do
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
e = get_responses(stub)
@ -403,18 +553,39 @@ describe 'ClientStub' do
th.join
end
it 'supports a server-initiated ping pong', bidi: true do
it 'supports a server-initiated ping pong' do
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join
end
it 'should raise an error if the status is not ok' do
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @fail, false)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
e = get_responses(stub)
expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus)
th.join
end
# TODO: add test for metadata-related ArgumentError in a bidi call once
# issue mentioned in https://github.com/grpc/grpc/issues/10526 is fixed
it 'should send metadata to the server ok' do
th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true,
expected_metadata: @metadata)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
e = get_responses(stub)
expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join
end
end
describe 'without a call operation' do
def get_responses(stub)
e = stub.bidi_streamer(@method, @sent_msgs, noop, noop)
e = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
metadata: @metadata)
expect(e).to be_a(Enumerator)
e
end
@ -428,7 +599,8 @@ describe 'ClientStub' do
end
def get_responses(stub, run_start_call_first: false)
@op = stub.bidi_streamer(@method, @sent_msgs, noop, noop,
return_op: true)
return_op: true,
metadata: @metadata)
expect(@op).to be_a(GRPC::ActiveCall::Operation)
@op.start_call if run_start_call_first
e = @op.execute
@ -438,27 +610,53 @@ describe 'ClientStub' do
it_behaves_like 'bidi streaming'
it 'can run start_call before executing the call' do
th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys,
@pass)
def run_op_view_metadata_test(run_start_call_first)
@server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' }
@server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' }
th = run_bidi_streamer_echo_ping_pong(
@sent_msgs, @pass, true,
expected_metadata: @metadata,
server_initial_md: @server_initial_md,
server_trailing_md: @server_trailing_md)
stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure)
e = get_responses(stub, run_start_call_first: true)
expect(e.collect { |r| r }).to eq(@replys)
e = get_responses(stub, run_start_call_first: run_start_call_first)
expect(e.collect { |r| r }).to eq(@sent_msgs)
th.join
end
it 'can run start_call before executing the call' do
run_op_view_metadata_test(true)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) do |responses|
responses.each { |r| p r }
end
end
it 'doesnt crash when op_view used after call has finished' do
run_op_view_metadata_test(false)
check_op_view_of_finished_client_call(
@op, @server_initial_md, @server_trailing_md) do |responses|
responses.each { |r| p r }
end
end
end
end
def run_server_streamer(expected_input, replys, status, **kw)
wanted_metadata = kw.clone
def run_server_streamer(expected_input, replys, status,
expected_metadata: {},
server_initial_md: {},
server_trailing_md: {})
wanted_metadata = expected_metadata.clone
wakey_thread do |notifier|
c = expect_server_to_be_invoked(notifier)
c = expect_server_to_be_invoked(
notifier, metadata_to_send: server_initial_md)
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
end
expect(c.remote_read).to eq(expected_input)
replys.each { |r| c.remote_send(r) }
c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true,
metadata: server_trailing_md)
end
end
@ -472,9 +670,17 @@ describe 'ClientStub' do
end
end
def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts)
def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts,
expected_metadata: {},
server_initial_md: {},
server_trailing_md: {})
wanted_metadata = expected_metadata.clone
wakey_thread do |notifier|
c = expect_server_to_be_invoked(notifier)
c = expect_server_to_be_invoked(
notifier, metadata_to_send: server_initial_md)
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
end
expected_inputs.each do |i|
if client_starts
expect(c.remote_read).to eq(i)
@ -484,33 +690,44 @@ describe 'ClientStub' do
expect(c.remote_read).to eq(i)
end
end
c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true,
metadata: server_trailing_md)
end
end
def run_client_streamer(expected_inputs, resp, status, **kw)
wanted_metadata = kw.clone
def run_client_streamer(expected_inputs, resp, status,
expected_metadata: {},
server_initial_md: {},
server_trailing_md: {})
wanted_metadata = expected_metadata.clone
wakey_thread do |notifier|
c = expect_server_to_be_invoked(notifier)
c = expect_server_to_be_invoked(
notifier, metadata_to_send: server_initial_md)
expected_inputs.each { |i| expect(c.remote_read).to eq(i) }
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
end
c.remote_send(resp)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true,
metadata: server_trailing_md)
end
end
def run_request_response(expected_input, resp, status, **kw)
wanted_metadata = kw.clone
def run_request_response(expected_input, resp, status,
expected_metadata: {},
server_initial_md: {},
server_trailing_md: {})
wanted_metadata = expected_metadata.clone
wakey_thread do |notifier|
c = expect_server_to_be_invoked(notifier)
c = expect_server_to_be_invoked(
notifier, metadata_to_send: server_initial_md)
expect(c.remote_read).to eq(expected_input)
wanted_metadata.each do |k, v|
expect(c.metadata[k.to_s]).to eq(v)
end
c.remote_send(resp)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true)
c.send_status(status, status == @pass ? 'OK' : 'NOK', true,
metadata: server_trailing_md)
end
end
@ -528,13 +745,13 @@ describe 'ClientStub' do
@server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
end
def expect_server_to_be_invoked(notifier)
def expect_server_to_be_invoked(notifier, metadata_to_send: nil)
@server.start
notifier.notify(nil)
recvd_rpc = @server.request_call
recvd_call = recvd_rpc.call
recvd_call.metadata = recvd_rpc.metadata
recvd_call.run_batch(SEND_INITIAL_METADATA => nil)
recvd_call.run_batch(SEND_INITIAL_METADATA => metadata_to_send)
GRPC::ActiveCall.new(recvd_call, noop, noop, INFINITE_FUTURE,
metadata_received: true)
end

@ -38,14 +38,14 @@ describe GRPC::RpcDesc do
shared_examples 'it handles errors' do
it 'sends the specified status if BadStatus is raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:read_unary_request).once.and_return(Object.new)
expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false,
metadata: {})
this_desc.run_server_method(@call, method(:bad_status))
end
it 'sends status UNKNOWN if other StandardErrors are raised' do
expect(@call).to receive(:remote_read).once.and_return(Object.new)
expect(@call).to receive(:read_unary_request).once.and_return(Object.new)
expect(@call).to receive(:send_status).once.with(UNKNOWN,
arg_error_msg,
false, metadata: {})
@ -53,7 +53,7 @@ describe GRPC::RpcDesc do
end
it 'absorbs CallError with no further action' do
expect(@call).to receive(:remote_read).once.and_raise(CallError)
expect(@call).to receive(:read_unary_request).once.and_raise(CallError)
blk = proc do
this_desc.run_server_method(@call, method(:fake_reqresp))
end
@ -75,7 +75,7 @@ describe GRPC::RpcDesc do
it 'sends a response and closes the stream if there no errors' do
req = Object.new
expect(@call).to receive(:remote_read).once.and_return(req)
expect(@call).to receive(:read_unary_request).once.and_return(req)
expect(@call).to receive(:output_metadata).once.and_return(fake_md)
expect(@call).to receive(:server_unary_response).once
.with(@ok_response, trailing_metadata: fake_md)
@ -133,7 +133,7 @@ describe GRPC::RpcDesc do
it 'sends a response and closes the stream if there no errors' do
req = Object.new
expect(@call).to receive(:remote_read).once.and_return(req)
expect(@call).to receive(:read_unary_request).once.and_return(req)
expect(@call).to receive(:remote_send).twice.with(@ok_response)
expect(@call).to receive(:output_metadata).and_return(fake_md)
expect(@call).to receive(:send_status).once.with(OK, 'OK', true,

@ -111,6 +111,47 @@ end
SlowStub = SlowService.rpc_stub_class
# a test service that hangs onto call objects
# and uses them after the server-side call has been
# finished
class CheckCallAfterFinishedService
include GRPC::GenericService
rpc :an_rpc, EchoMsg, EchoMsg
rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg
rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg)
rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg)
attr_reader :server_side_call
def an_rpc(req, call)
fail 'shouldnt reuse service' unless @server_side_call.nil?
@server_side_call = call
req
end
def a_client_streaming_rpc(call)
fail 'shouldnt reuse service' unless @server_side_call.nil?
@server_side_call = call
# iterate through requests so call can complete
call.each_remote_read.each { |r| p r }
EchoMsg.new
end
def a_server_streaming_rpc(_, call)
fail 'shouldnt reuse service' unless @server_side_call.nil?
@server_side_call = call
[EchoMsg.new, EchoMsg.new]
end
def a_bidi_rpc(requests, call)
fail 'shouldnt reuse service' unless @server_side_call.nil?
@server_side_call = call
requests.each { |r| p r }
[EchoMsg.new, EchoMsg.new]
end
end
CheckCallAfterFinishedServiceStub = CheckCallAfterFinishedService.rpc_stub_class
describe GRPC::RpcServer do
RpcServer = GRPC::RpcServer
StatusCodes = GRPC::Core::StatusCodes
@ -505,5 +546,109 @@ describe GRPC::RpcServer do
t.join
end
end
context 'when call objects are used after calls have completed' do
before(:each) do
server_opts = {
poll_period: 1
}
@srv = RpcServer.new(**server_opts)
alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
@alt_host = "0.0.0.0:#{alt_port}"
@service = CheckCallAfterFinishedService.new
@srv.handle(@service)
@srv_thd = Thread.new { @srv.run }
@srv.wait_till_running
end
# check that the server-side call is still in a usable state even
# after it has finished
def check_single_req_view_of_finished_call(call)
common_check_of_finished_server_call(call)
expect(call.peer).to be_a(String)
expect(call.peer_cert).to be(nil)
end
def check_multi_req_view_of_finished_call(call)
common_check_of_finished_server_call(call)
expect do
call.each_remote_read.each { |r| p r }
end.to raise_error(GRPC::Core::CallError)
end
def common_check_of_finished_server_call(call)
expect do
call.merge_metadata_to_send({})
end.to raise_error(RuntimeError)
expect do
call.send_initial_metadata
end.to_not raise_error
expect(call.cancelled?).to be(false)
expect(call.metadata).to be_a(Hash)
expect(call.metadata['user-agent']).to be_a(String)
expect(call.metadata_sent).to be(true)
expect(call.output_metadata).to eq({})
expect(call.metadata_to_send).to eq({})
expect(call.deadline.is_a?(Time)).to be(true)
end
it 'should not crash when call used after an unary call is finished' do
req = EchoMsg.new
stub = CheckCallAfterFinishedServiceStub.new(@alt_host,
:this_channel_is_insecure)
resp = stub.an_rpc(req)
expect(resp).to be_a(EchoMsg)
@srv.stop
@srv_thd.join
check_single_req_view_of_finished_call(@service.server_side_call)
end
it 'should not crash when call used after client streaming finished' do
requests = [EchoMsg.new, EchoMsg.new]
stub = CheckCallAfterFinishedServiceStub.new(@alt_host,
:this_channel_is_insecure)
resp = stub.a_client_streaming_rpc(requests)
expect(resp).to be_a(EchoMsg)
@srv.stop
@srv_thd.join
check_multi_req_view_of_finished_call(@service.server_side_call)
end
it 'should not crash when call used after server streaming finished' do
req = EchoMsg.new
stub = CheckCallAfterFinishedServiceStub.new(@alt_host,
:this_channel_is_insecure)
responses = stub.a_server_streaming_rpc(req)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
@srv.stop
@srv_thd.join
check_single_req_view_of_finished_call(@service.server_side_call)
end
it 'should not crash when call used after a bidi call is finished' do
requests = [EchoMsg.new, EchoMsg.new]
stub = CheckCallAfterFinishedServiceStub.new(@alt_host,
:this_channel_is_insecure)
responses = stub.a_bidi_rpc(requests)
responses.each do |r|
expect(r).to be_a(EchoMsg)
end
@srv.stop
@srv_thd.join
check_multi_req_view_of_finished_call(@service.server_side_call)
end
end
end
end

@ -0,0 +1,16 @@
-----BEGIN PRIVATE KEY-----
MIICeQIBADANBgkqhkiG9w0BAQEFAASCAmMwggJfAgEAAoGBAOxUR9uhvhbeVUIM
s5WbH0px0mehl2+6sZpNjzvE2KimZpHzMJHukVH0Ffkvhs0b8+S5Ut9VNUAqd3IM
JCCAEGtRNoQhM1t9Yr2zAckSvbRacp+FL/Cj9eDmyo00KsVGaeefA4Dh4OW+ZhkT
NKcldXqkSuj1sEf244JZYuqZp6/tAgMBAAECgYEAi2NSVqpZMafE5YYUTcMGe6QS
k2jtpsqYgggI2RnLJ/2tNZwYI5pwP8QVSbnMaiF4gokD5hGdrNDfTnb2v+yIwYEH
0w8+oG7Z81KodsiZSIDJfTGsAZhVNwOz9y0VD8BBZZ1/274Zh52AUKLjZS/ZwIbS
W2ywya855dPnH/wj+0ECQQD9X8D920kByTNHhBG18biAEZ4pxs9f0OAG8333eVcI
w2lJDLsYDZrCB2ocgA3lUdozlzPC7YDYw8reg0tkiRY5AkEA7sdNzOeQsQRn7++5
0bP9DtT/iON1gbfxRzCfCfXdoOtfQWIzTePWtURt9X/5D9NofI0Rg5W2oGy/MLe5
/sXHVQJBAIup5XrJDkQywNZyAUU2ecn2bCWBFjwtqd+LBmuMciI9fOKsZtEKZrz/
U0lkeMRoSwvXE8wmGLjjrAbdfohrXFkCQQDZEx/LtIl6JINJQiswVe0tWr6k+ASP
1WXoTm+HYpoF/XUvv9LccNF1IazFj34hwRQwhx7w/V52Ieb+p0jUMYGxAkEAjDhd
9pBO1fKXWiXzi9ZKfoyTNcUq3eBSVKwPG2nItg5ycXengjT5sgcWDnciIzW7BIVI
JiqOszq9GWESErAatg==
-----END PRIVATE KEY-----

@ -0,0 +1,14 @@
-----BEGIN CERTIFICATE-----
MIICHzCCAYgCAQEwDQYJKoZIhvcNAQEFBQAwVjELMAkGA1UEBhMCQVUxEzARBgNV
BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0
ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTE0MDcxNzIzNTYwMloXDTI0MDcxNDIzNTYw
MlowWjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDETMBEGA1UEAwwKdGVzdGNsaWVudDCB
nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA7FRH26G+Ft5VQgyzlZsfSnHSZ6GX
b7qxmk2PO8TYqKZmkfMwke6RUfQV+S+GzRvz5LlS31U1QCp3cgwkIIAQa1E2hCEz
W31ivbMByRK9tFpyn4Uv8KP14ObKjTQqxUZp558DgOHg5b5mGRM0pyV1eqRK6PWw
R/bjglli6pmnr+0CAwEAATANBgkqhkiG9w0BAQUFAAOBgQAStSm5PM7ubROiKK6/
T2FkKlhiTOx+Ryenm3Eio59emq+jXl+1nhPySX5G2PQzSR5vd1dIhwgZSR4Gyttk
tRZ57k/NI1brUW8joiEOMJA/Mr7H7asx7wIRYDE91Fs8GkKWd5LhoPAQj+qdG35C
OO+svdkmqH0KZo320ZUqdl2ooQ==
-----END CERTIFICATE-----

@ -165,19 +165,22 @@
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
% if defaults['global'].get('CPPFLAGS', None) is not None:
'OTHER_CFLAGS': [
% for item in defaults['global'].get('CPPFLAGS').split():
'${item}',
% endfor
],
'OTHER_CPLUSPLUSFLAGS': [
% for item in defaults['global'].get('CPPFLAGS').split():
'${item}',
% endfor
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations'
],
% endif
},
% if defaults['global'].get('CPPFLAGS', None) is not None:
'OTHER_CFLAGS': [
% for item in defaults['global'].get('CPPFLAGS').split():
'${item}',
% endfor
],
'OTHER_CPLUSPLUSFLAGS': [
'-stdlib=libc++',
'-std=c++11'
],
% endif
}]
]
},
@ -201,6 +204,13 @@
'${source}',
% endfor
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
% endif
% endfor
@ -282,6 +292,13 @@
'${source}',
% endfor
],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
},
% endif
% endfor
@ -317,6 +334,11 @@
'ldflags': [
'-Wl,-wrap,memcpy'
]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
],
"target_name": "${module.name}",

@ -106,6 +106,8 @@ extern void ping(grpc_end2end_test_config config);
extern void ping_pre_init(void);
extern void ping_pong_streaming(grpc_end2end_test_config config);
extern void ping_pong_streaming_pre_init(void);
extern void proxy_auth(grpc_end2end_test_config config);
extern void proxy_auth_pre_init(void);
extern void registered_call(grpc_end2end_test_config config);
extern void registered_call_pre_init(void);
extern void request_with_flags(grpc_end2end_test_config config);
@ -181,6 +183,7 @@ void grpc_end2end_tests_pre_init(void) {
payload_pre_init();
ping_pre_init();
ping_pong_streaming_pre_init();
proxy_auth_pre_init();
registered_call_pre_init();
request_with_flags_pre_init();
request_with_payload_pre_init();
@ -244,6 +247,7 @@ void grpc_end2end_tests(int argc, char **argv,
payload(config);
ping(config);
ping_pong_streaming(config);
proxy_auth(config);
registered_call(config);
request_with_flags(config);
request_with_payload(config);
@ -416,6 +420,10 @@ void grpc_end2end_tests(int argc, char **argv,
ping_pong_streaming(config);
continue;
}
if (0 == strcmp("proxy_auth", argv[i])) {
proxy_auth(config);
continue;
}
if (0 == strcmp("registered_call", argv[i])) {
registered_call(config);
continue;

@ -108,6 +108,8 @@ extern void ping(grpc_end2end_test_config config);
extern void ping_pre_init(void);
extern void ping_pong_streaming(grpc_end2end_test_config config);
extern void ping_pong_streaming_pre_init(void);
extern void proxy_auth(grpc_end2end_test_config config);
extern void proxy_auth_pre_init(void);
extern void registered_call(grpc_end2end_test_config config);
extern void registered_call_pre_init(void);
extern void request_with_flags(grpc_end2end_test_config config);
@ -184,6 +186,7 @@ void grpc_end2end_tests_pre_init(void) {
payload_pre_init();
ping_pre_init();
ping_pong_streaming_pre_init();
proxy_auth_pre_init();
registered_call_pre_init();
request_with_flags_pre_init();
request_with_payload_pre_init();
@ -248,6 +251,7 @@ void grpc_end2end_tests(int argc, char **argv,
payload(config);
ping(config);
ping_pong_streaming(config);
proxy_auth(config);
registered_call(config);
request_with_flags(config);
request_with_payload(config);
@ -424,6 +428,10 @@ void grpc_end2end_tests(int argc, char **argv,
ping_pong_streaming(config);
continue;
}
if (0 == strcmp("proxy_auth", argv[i])) {
proxy_auth(config);
continue;
}
if (0 == strcmp("registered_call", argv[i])) {
registered_call(config);
continue;

@ -47,11 +47,13 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
grpc_channel_args *client_args, grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data));
const int server_port = grpc_pick_unused_port_or_die();
gpr_join_host_port(&ffd->server_addr, "localhost", server_port);
ffd->proxy = grpc_end2end_http_proxy_create();
/* Passing client_args to proxy_create for the case of checking for proxy auth
*/
ffd->proxy = grpc_end2end_http_proxy_create(client_args);
f.fixture_data = ffd;
f.cq = grpc_completion_queue_create_for_next(NULL);
@ -64,8 +66,17 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
char *proxy_uri;
gpr_asprintf(&proxy_uri, "http://%s",
grpc_end2end_http_proxy_get_proxy_name(ffd->proxy));
/* If testing for proxy auth, add credentials to proxy uri */
const grpc_arg *proxy_auth_arg =
grpc_channel_args_find(client_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS);
if (proxy_auth_arg == NULL || proxy_auth_arg->type != GRPC_ARG_STRING) {
gpr_asprintf(&proxy_uri, "http://%s",
grpc_end2end_http_proxy_get_proxy_name(ffd->proxy));
} else {
gpr_asprintf(&proxy_uri, "http://%s@%s", proxy_auth_arg->value.string,
grpc_end2end_http_proxy_get_proxy_name(ffd->proxy));
}
gpr_setenv("http_proxy", proxy_uri);
gpr_free(proxy_uri);
f->client = grpc_insecure_channel_create(ffd->server_addr, client_args, NULL);

@ -22,6 +22,7 @@
#include <string.h>
#include <grpc/grpc.h>
#include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
@ -46,7 +47,9 @@
#include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
#include "test/core/util/port.h"
struct grpc_end2end_http_proxy {
@ -304,6 +307,28 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg,
&conn->on_write_response_done);
}
/**
* Parses the proxy auth header value to check if it matches :-
* Basic <base64_encoded_expected_cred>
* Returns true if it matches, false otherwise
*/
static bool proxy_auth_header_matches(grpc_exec_ctx* exec_ctx,
char* proxy_auth_header_val,
char* expected_cred) {
GPR_ASSERT(proxy_auth_header_val != NULL);
GPR_ASSERT(expected_cred != NULL);
if (strncmp(proxy_auth_header_val, "Basic ", 6) != 0) {
return false;
}
proxy_auth_header_val += 6;
grpc_slice decoded_slice =
grpc_base64_decode(exec_ctx, proxy_auth_header_val, 0);
const bool header_matches =
grpc_slice_str_cmp(decoded_slice, expected_cred) == 0;
grpc_slice_unref_internal(exec_ctx, decoded_slice);
return header_matches;
}
// Callback to read the HTTP CONNECT request.
// TODO(roth): Technically, for any of the failure modes handled by this
// function, we should handle the error by returning an HTTP response to
@ -352,6 +377,28 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
GRPC_ERROR_UNREF(error);
return;
}
// If proxy auth is being used, check if the header is present and as expected
const grpc_arg* proxy_auth_arg = grpc_channel_args_find(
conn->proxy->channel_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS);
if (proxy_auth_arg != NULL && proxy_auth_arg->type == GRPC_ARG_STRING) {
bool client_authenticated = false;
for (size_t i = 0; i < conn->http_request.hdr_count; i++) {
if (strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) {
client_authenticated = proxy_auth_header_matches(
exec_ctx, conn->http_request.hdrs[i].value,
proxy_auth_arg->value.string);
break;
}
}
if (!client_authenticated) {
const char* msg = "HTTP Connect could not verify authentication";
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(msg);
proxy_connection_failed(exec_ctx, conn, true /* is_client */,
"HTTP proxy read request", error);
GRPC_ERROR_UNREF(error);
return;
}
}
// Resolve address.
grpc_resolved_addresses* resolved_addresses = NULL;
error = grpc_blocking_resolve_address(conn->http_request.path, "80",
@ -436,7 +483,8 @@ static void thread_main(void* arg) {
grpc_exec_ctx_finish(&exec_ctx);
}
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
grpc_channel_args* args) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_end2end_http_proxy* proxy =
(grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy));
@ -448,7 +496,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
gpr_join_host_port(&proxy->proxy_name, "localhost", proxy_port);
gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name);
// Create TCP server.
proxy->channel_args = grpc_channel_args_copy(NULL);
proxy->channel_args = grpc_channel_args_copy(args);
grpc_error* error = grpc_tcp_server_create(
&exec_ctx, NULL, proxy->channel_args, &proxy->server);
GPR_ASSERT(error == GRPC_ERROR_NONE);

@ -16,11 +16,28 @@
*
*/
#ifndef GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H
#define GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H
#include <grpc/grpc.h>
/* The test credentials being used for HTTP Proxy Authorization */
#define GRPC_TEST_HTTP_PROXY_AUTH_CREDS "aladdin:opensesame"
/* A channel arg key used to indicate that the channel uses proxy authorization.
* The value (string) should be the proxy auth credentials that should be
* checked.
*/
#define GRPC_ARG_HTTP_PROXY_AUTH_CREDS "grpc.test.proxy_auth"
typedef struct grpc_end2end_http_proxy grpc_end2end_http_proxy;
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create();
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
grpc_channel_args* args);
void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy);
const char* grpc_end2end_http_proxy_get_proxy_name(
grpc_end2end_http_proxy* proxy);
#endif /* GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H */

@ -24,9 +24,9 @@ import hashlib
FixtureOptions = collections.namedtuple(
'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2')
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth')
default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True)
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
@ -47,7 +47,7 @@ END2END_FIXTURES = {
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace(
@ -69,8 +69,8 @@ END2END_FIXTURES = {
TestOptions = collections.namedtuple(
'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False)
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1
@ -128,6 +128,7 @@ END2END_TESTS = {
'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options,
'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU),
@ -178,6 +179,9 @@ def compatible(f, t):
if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2:
return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
return True

@ -21,7 +21,7 @@ load("//bazel:grpc_build_system.bzl", "grpc_sh_test", "grpc_cc_binary", "grpc_cc
def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True,
name_resolution=True, secure=True, tracing=False,
platforms=['windows', 'linux', 'mac', 'posix'],
is_inproc=False, is_http2=True):
is_inproc=False, is_http2=True, supports_proxy_auth=False):
return struct(
fullstack=fullstack,
includes_proxy=includes_proxy,
@ -30,7 +30,8 @@ def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True,
secure=secure,
tracing=tracing,
is_inproc=is_inproc,
is_http2=is_http2
is_http2=is_http2,
supports_proxy_auth=supports_proxy_auth
#platforms=platforms
)
@ -47,7 +48,7 @@ END2END_FIXTURES = {
'h2_full+pipe': fixture_options(platforms=['linux']),
'h2_full+trace': fixture_options(tracing=True),
'h2_full+workarounds': fixture_options(),
'h2_http_proxy': fixture_options(),
'h2_http_proxy': fixture_options(supports_proxy_auth=True),
'h2_oauth2': fixture_options(),
'h2_proxy': fixture_options(includes_proxy=True),
'h2_sockpair_1byte': fixture_options(fullstack=False, dns_resolver=False),
@ -67,7 +68,8 @@ END2END_FIXTURES = {
def test_options(needs_fullstack=False, needs_dns=False, needs_names=False,
proxyable=True, secure=False, traceable=False,
exclude_inproc=False, needs_http2=False):
exclude_inproc=False, needs_http2=False,
needs_proxy_auth=False):
return struct(
needs_fullstack=needs_fullstack,
needs_dns=needs_dns,
@ -76,7 +78,8 @@ def test_options(needs_fullstack=False, needs_dns=False, needs_names=False,
secure=secure,
traceable=traceable,
exclude_inproc=exclude_inproc,
needs_http2=needs_http2
needs_http2=needs_http2,
needs_proxy_auth=needs_proxy_auth
)
@ -123,6 +126,7 @@ END2END_TESTS = {
'load_reporting_hook': test_options(),
'ping_pong_streaming': test_options(),
'ping': test_options(needs_fullstack=True, proxyable=False),
'proxy_auth': test_options(needs_proxy_auth=True),
'registered_call': test_options(),
'request_with_flags': test_options(proxyable=False),
'request_with_payload': test_options(),
@ -165,6 +169,9 @@ def compatible(fopt, topt):
if topt.needs_http2:
if not fopt.is_http2:
return False
if topt.needs_proxy_auth:
if not fopt.supports_proxy_auth:
return False
return True

@ -0,0 +1,235 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This test is for checking whether proxy authentication is working with HTTP
* Connect.
*/
#include "test/core/end2end/end2end_tests.h"
#include "test/core/end2end/fixtures/http_proxy_fixture.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
#include "test/core/end2end/cq_verifier.h"
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_from_now(int n) {
return grpc_timeout_seconds_to_deadline(n);
}
static gpr_timespec five_seconds_from_now(void) {
return n_seconds_from_now(5);
}
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_from_now(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
grpc_completion_queue_destroy(f->shutdown_cq);
}
static void simple_request_body(grpc_end2end_test_config config,
grpc_end2end_test_fixture f) {
grpc_call *c;
grpc_call *s;
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
char *peer;
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/foo"),
get_host_override_slice("foo.test.google.fr:1234", config), deadline,
NULL);
GPR_ASSERT(c);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "client_peer_before_call=%s", peer);
gpr_free(peer);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(s);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "server_peer=%s", peer);
gpr_free(peer);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "client_peer=%s", peer);
gpr_free(peer);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
grpc_slice status_details = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_details;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo"));
validate_host_override_string("foo.test.google.fr:1234", call_details.host,
config);
GPR_ASSERT(0 == call_details.flags);
GPR_ASSERT(was_cancelled == 1);
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}
static void test_invoke_proxy_auth(grpc_end2end_test_config config) {
/* Indicate that the proxy requires user auth */
grpc_arg client_arg = {.type = GRPC_ARG_STRING,
.key = GRPC_ARG_HTTP_PROXY_AUTH_CREDS,
.value.string = GRPC_TEST_HTTP_PROXY_AUTH_CREDS};
grpc_channel_args client_args = {.num_args = 1, .args = &client_arg};
grpc_end2end_test_fixture f =
begin_test(config, "test_invoke_proxy_auth", &client_args, NULL);
simple_request_body(config, f);
end_test(&f);
config.tear_down_data(&f);
}
void proxy_auth(grpc_end2end_test_config config) {
test_invoke_proxy_auth(config);
}
void proxy_auth_pre_init(void) {}

@ -190,7 +190,8 @@ static void consumer_thread(void *arg) {
gpr_log(GPR_INFO, "consumer %d phase 2", opt->id);
for (;;) {
ev = grpc_completion_queue_next(opt->cc, ten_seconds_time(), NULL);
ev = grpc_completion_queue_next(opt->cc,
gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL);
switch (ev.type) {
case GRPC_OP_COMPLETE:
GPR_ASSERT(ev.success);

@ -1127,6 +1127,7 @@ src/core/lib/iomgr/iomgr_internal.h \
src/core/lib/iomgr/iomgr_posix.c \
src/core/lib/iomgr/iomgr_posix.h \
src/core/lib/iomgr/iomgr_uv.c \
src/core/lib/iomgr/iomgr_uv.h \
src/core/lib/iomgr/iomgr_windows.c \
src/core/lib/iomgr/is_epollexclusive_available.c \
src/core/lib/iomgr/is_epollexclusive_available.h \

@ -164,7 +164,6 @@ def expand_directives(root, directives):
if intersect:
for f in sorted(files_add): # sorted to ensure merge stability
if f not in intersect:
print("X", root, glob_add, glob_have)
out_globs[os.path.relpath(f, start=root)] = who_add
for who in who_have:
if who not in out_globs[glob_add]:
@ -185,7 +184,6 @@ def add_parent_to_globs(parent, globs, globs_dir):
if intersect:
for f in sorted(files_child): # sorted to ensure merge stability
if f not in intersect:
print("Y", full_dir(owners.dir, oglob), full_dir(globs_dir, gglob))
who = gglob_who_orig.copy()
globs[os.path.relpath(f, start=globs_dir)] = who
for who in oglob_who:

@ -67,6 +67,12 @@ def _args():
default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
)
argp.add_argument(
'-r',
'--regex',
type=str,
default="",
help='Regex to filter benchmarks run')
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
@ -144,7 +150,7 @@ def _read_json(filename, badjson_files, nonexistant_files):
def fmt_dict(d):
return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d])
def diff(bms, loops, track, old, new, counters):
def diff(bms, loops, regex, track, old, new, counters):
benchmarks = collections.defaultdict(Benchmark)
badjson_files = {}
@ -153,7 +159,8 @@ def diff(bms, loops, track, old, new, counters):
for loop in range(0, loops):
for line in subprocess.check_output(
['bm_diff_%s/opt/%s' % (old, bm),
'--benchmark_list_tests']).splitlines():
'--benchmark_list_tests',
'--benchmark_filter=%s' % regex]).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
@ -211,6 +218,6 @@ def diff(bms, loops, track, old, new, counters):
if __name__ == '__main__':
args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old,
diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old,
args.new, args.counters)
print('%s\n%s' % (note, diff if diff else "No performance differences"))

@ -63,10 +63,10 @@ def _args():
help='Name of baseline run to compare to. Ususally just called "old"')
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
'--regex',
type=str,
default="",
help='Regex to filter benchmarks run')
argp.add_argument(
'-l',
'--loops',
@ -125,10 +125,10 @@ def main(args):
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.regex, args.counters)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.regex, args.counters)
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old,
'new', args.counters)
if diff:
text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)

@ -56,10 +56,10 @@ def _args():
)
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
'--regex',
type=str,
default="",
help='Regex to filter benchmarks run')
argp.add_argument(
'-l',
'--loops',
@ -77,18 +77,17 @@ def _args():
return args
def _collect_bm_data(bm, cfg, name, reps, idx, loops):
def _collect_bm_data(bm, cfg, name, regex, idx, loops):
jobs_list = []
for line in subprocess.check_output(
['bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_list_tests']).splitlines():
'--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
(bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (reps)
]
jobs_list.append(
jobset.JobSpec(
@ -100,13 +99,13 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops):
return jobs_list
def run(name, benchmarks, jobs, loops, reps, counters):
def run(name, benchmarks, jobs, loops, regex, counters):
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
if counters:
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
loops)
random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=jobs)
@ -114,4 +113,4 @@ def run(name, benchmarks, jobs, loops, reps, counters):
if __name__ == '__main__':
args = _args()
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
run(args.name, args.benchmarks, args.jobs, args.loops, args.regex, args.counters)

@ -7339,6 +7339,7 @@
"test/core/end2end/tests/payload.c",
"test/core/end2end/tests/ping.c",
"test/core/end2end/tests/ping_pong_streaming.c",
"test/core/end2end/tests/proxy_auth.c",
"test/core/end2end/tests/registered_call.c",
"test/core/end2end/tests/request_with_flags.c",
"test/core/end2end/tests/request_with_payload.c",
@ -7416,6 +7417,7 @@
"test/core/end2end/tests/payload.c",
"test/core/end2end/tests/ping.c",
"test/core/end2end/tests/ping_pong_streaming.c",
"test/core/end2end/tests/proxy_auth.c",
"test/core/end2end/tests/registered_call.c",
"test/core/end2end/tests/request_with_flags.c",
"test/core/end2end/tests/request_with_payload.c",
@ -7740,6 +7742,7 @@
"src/core/lib/iomgr/iomgr.h",
"src/core/lib/iomgr/iomgr_internal.h",
"src/core/lib/iomgr/iomgr_posix.h",
"src/core/lib/iomgr/iomgr_uv.h",
"src/core/lib/iomgr/is_epollexclusive_available.h",
"src/core/lib/iomgr/load_file.h",
"src/core/lib/iomgr/lockfree_event.h",
@ -7904,6 +7907,7 @@
"src/core/lib/iomgr/iomgr_posix.c",
"src/core/lib/iomgr/iomgr_posix.h",
"src/core/lib/iomgr/iomgr_uv.c",
"src/core/lib/iomgr/iomgr_uv.h",
"src/core/lib/iomgr/iomgr_windows.c",
"src/core/lib/iomgr/is_epollexclusive_available.c",
"src/core/lib/iomgr/is_epollexclusive_available.h",

@ -1390,7 +1390,9 @@
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [],
"exclude_iomgrs": [
"uv"
],
"flaky": false,
"gtest": false,
"language": "c",
@ -3991,7 +3993,8 @@
"mac",
"posix",
"windows"
]
],
"timeout_seconds": 1200
},
{
"args": [],
@ -16349,6 +16352,30 @@
"posix"
]
},
{
"args": [
"proxy_auth"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{
"args": [
"registered_call"
@ -38862,6 +38889,30 @@
"posix"
]
},
{
"args": [
"proxy_auth"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{
"args": [
"registered_call"

@ -245,7 +245,7 @@ class CLanguage(object):
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
if args.iomgr_platform == "uv":
cflags = '-DGRPC_UV '
cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
try:
cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
except (subprocess.CalledProcessError, OSError):

@ -336,6 +336,7 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\lockfree_event.h" />

@ -971,6 +971,9 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>

@ -231,6 +231,7 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\lockfree_event.h" />

@ -695,6 +695,9 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>

@ -326,6 +326,7 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_internal.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\load_file.h" />
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\lockfree_event.h" />

@ -878,6 +878,9 @@
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_posix.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\iomgr_uv.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>
<ClInclude Include="$(SolutionDir)\..\src\core\lib\iomgr\is_epollexclusive_available.h">
<Filter>src\core\lib\iomgr</Filter>
</ClInclude>

@ -231,6 +231,8 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c">

@ -121,6 +121,9 @@
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>

@ -233,6 +233,8 @@
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c">

@ -124,6 +124,9 @@
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>

Loading…
Cancel
Save