Merge github.com:grpc/grpc into grpc_millis

pull/11866/head
Craig Tiller 8 years ago
commit 016ad385e7
  1. 2
      CMakeLists.txt
  2. 2
      Makefile
  3. 2
      README.md
  4. 39
      binding.gyp
  5. 1
      build.yaml
  6. 64
      src/core/ext/filters/client_channel/http_proxy.c
  7. 4
      src/core/ext/transport/chttp2/transport/parsing.c
  8. 270
      src/core/lib/iomgr/ev_epoll1_linux.c
  9. 8
      src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c
  10. 8
      src/core/lib/iomgr/ev_epoll_thread_pool_linux.c
  11. 12
      src/core/lib/iomgr/ev_epollex_linux.c
  12. 10
      src/core/lib/iomgr/ev_epollsig_linux.c
  13. 14
      src/core/lib/iomgr/lockfree_event.c
  14. 5
      src/core/lib/iomgr/lockfree_event.h
  15. 28
      templates/binding.gyp.template
  16. 8
      test/core/end2end/end2end_nosec_tests.c
  17. 8
      test/core/end2end/end2end_tests.c
  18. 15
      test/core/end2end/fixtures/h2_http_proxy.c
  19. 52
      test/core/end2end/fixtures/http_proxy_fixture.c
  20. 19
      test/core/end2end/fixtures/http_proxy_fixture.h
  21. 14
      test/core/end2end/gen_build_yaml.py
  22. 17
      test/core/end2end/generate_tests.bzl
  23. 235
      test/core/end2end/tests/proxy_auth.c
  24. 3
      test/core/surface/completion_queue_threading_test.c
  25. 8
      tools/profiling/microbenchmarks/bm_diff/bm_diff.py
  26. 2
      tools/run_tests/generated/sources_and_headers.json
  27. 51
      tools/run_tests/generated/tests.json
  28. 2
      vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj
  29. 3
      vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters
  30. 2
      vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj
  31. 3
      vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters

@ -4358,6 +4358,7 @@ add_library(end2end_tests
test/core/end2end/tests/payload.c test/core/end2end/tests/payload.c
test/core/end2end/tests/ping.c test/core/end2end/tests/ping.c
test/core/end2end/tests/ping_pong_streaming.c test/core/end2end/tests/ping_pong_streaming.c
test/core/end2end/tests/proxy_auth.c
test/core/end2end/tests/registered_call.c test/core/end2end/tests/registered_call.c
test/core/end2end/tests/request_with_flags.c test/core/end2end/tests/request_with_flags.c
test/core/end2end/tests/request_with_payload.c test/core/end2end/tests/request_with_payload.c
@ -4457,6 +4458,7 @@ add_library(end2end_nosec_tests
test/core/end2end/tests/payload.c test/core/end2end/tests/payload.c
test/core/end2end/tests/ping.c test/core/end2end/tests/ping.c
test/core/end2end/tests/ping_pong_streaming.c test/core/end2end/tests/ping_pong_streaming.c
test/core/end2end/tests/proxy_auth.c
test/core/end2end/tests/registered_call.c test/core/end2end/tests/registered_call.c
test/core/end2end/tests/request_with_flags.c test/core/end2end/tests/request_with_flags.c
test/core/end2end/tests/request_with_payload.c test/core/end2end/tests/request_with_payload.c

@ -7956,6 +7956,7 @@ LIBEND2END_TESTS_SRC = \
test/core/end2end/tests/payload.c \ test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping.c \
test/core/end2end/tests/ping_pong_streaming.c \ test/core/end2end/tests/ping_pong_streaming.c \
test/core/end2end/tests/proxy_auth.c \
test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \ test/core/end2end/tests/request_with_payload.c \
@ -8050,6 +8051,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \
test/core/end2end/tests/payload.c \ test/core/end2end/tests/payload.c \
test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping.c \
test/core/end2end/tests/ping_pong_streaming.c \ test/core/end2end/tests/ping_pong_streaming.c \
test/core/end2end/tests/proxy_auth.c \
test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/registered_call.c \
test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_flags.c \
test/core/end2end/tests/request_with_payload.c \ test/core/end2end/tests/request_with_payload.c \

@ -17,7 +17,7 @@ See [INSTALL](INSTALL.md) for installation instructions for various platforms.
See [tools/run_tests](tools/run_tests) for more guidance on how to run various test suites (e.g. unit tests, interop tests, benchmarks) See [tools/run_tests](tools/run_tests) for more guidance on how to run various test suites (e.g. unit tests, interop tests, benchmarks)
See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5712453606309888) for the performance numbers for v1.0.x. See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5636470266134528) for the performance numbers for the latest released version.
# Repository Structure & Status # Repository Structure & Status

@ -175,8 +175,6 @@
}], }],
['OS == "mac"', { ['OS == "mac"', {
'xcode_settings': { 'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
},
'OTHER_CFLAGS': [ 'OTHER_CFLAGS': [
'-g', '-g',
'-Wall', '-Wall',
@ -187,9 +185,18 @@
'-DOSATOMIC_USE_INLINED=1', '-DOSATOMIC_USE_INLINED=1',
], ],
'OTHER_CPLUSPLUSFLAGS': [ 'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-unused-parameter',
'-DOSATOMIC_USE_INLINED=1',
'-stdlib=libc++', '-stdlib=libc++',
'-std=c++11' '-std=c++11',
'-Wno-error=deprecated-declarations'
], ],
},
}] }]
] ]
}, },
@ -508,6 +515,13 @@
'third_party/boringssl/ssl/tls_method.c', 'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c', 'third_party/boringssl/ssl/tls_record.c',
], ],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
}, },
], ],
}], }],
@ -625,6 +639,13 @@
'src/core/lib/support/tmpfile_windows.c', 'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c', 'src/core/lib/support/wrap_memcpy.c',
], ],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
}, },
{ {
'target_name': 'grpc', 'target_name': 'grpc',
@ -889,6 +910,13 @@
'src/core/ext/filters/workarounds/workaround_utils.c', 'src/core/ext/filters/workarounds/workaround_utils.c',
'src/core/plugin_registry/grpc_plugin_registry.c', 'src/core/plugin_registry/grpc_plugin_registry.c',
], ],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
}, },
{ {
'include_dirs': [ 'include_dirs': [
@ -914,6 +942,11 @@
'ldflags': [ 'ldflags': [
'-Wl,-wrap,memcpy' '-Wl,-wrap,memcpy'
] ]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}] }]
], ],
"target_name": "grpc_node", "target_name": "grpc_node",

@ -4498,6 +4498,7 @@ targets:
- grpc - grpc
- gpr_test_util - gpr_test_util
- gpr - gpr
timeout_seconds: 1200
- name: writes_per_rpc_test - name: writes_per_rpc_test
gtest: true gtest: true
cpu_cost: 0.5 cpu_cost: 0.5

@ -30,15 +30,23 @@
#include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h"
#include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/ext/filters/client_channel/uri_parser.h"
#include "src/core/lib/channel/channel_args.h" #include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/support/env.h" #include "src/core/lib/support/env.h"
#include "src/core/lib/support/string.h" #include "src/core/lib/support/string.h"
static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) { /**
* Parses the 'http_proxy' env var and returns the proxy hostname to resolve or
* NULL on error. Also sets 'user_cred' to user credentials if present in the
* 'http_proxy' env var, otherwise leaves it unchanged. It is caller's
* responsibility to gpr_free user_cred.
*/
static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
GPR_ASSERT(user_cred != NULL);
char* proxy_name = NULL;
char* uri_str = gpr_getenv("http_proxy"); char* uri_str = gpr_getenv("http_proxy");
if (uri_str == NULL) return NULL; if (uri_str == NULL) return NULL;
grpc_uri* uri = grpc_uri* uri =
grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */); grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
char* proxy_name = NULL;
if (uri == NULL || uri->authority == NULL) { if (uri == NULL || uri->authority == NULL) {
gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var"); gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var");
goto done; goto done;
@ -47,11 +55,27 @@ static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) {
gpr_log(GPR_ERROR, "'%s' scheme not supported in proxy URI", uri->scheme); gpr_log(GPR_ERROR, "'%s' scheme not supported in proxy URI", uri->scheme);
goto done; goto done;
} }
if (strchr(uri->authority, '@') != NULL) { /* Split on '@' to separate user credentials from host */
gpr_log(GPR_ERROR, "userinfo not supported in proxy URI"); char** authority_strs = NULL;
goto done; size_t authority_nstrs;
gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs);
GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */
if (authority_nstrs == 1) {
/* User cred not present in authority */
proxy_name = authority_strs[0];
} else if (authority_nstrs == 2) {
/* User cred found */
*user_cred = authority_strs[0];
proxy_name = authority_strs[1];
gpr_log(GPR_DEBUG, "userinfo found in proxy URI");
} else {
/* Bad authority */
for (size_t i = 0; i < authority_nstrs; i++) {
gpr_free(authority_strs[i]);
} }
proxy_name = gpr_strdup(uri->authority); proxy_name = NULL;
}
gpr_free(authority_strs);
done: done:
gpr_free(uri_str); gpr_free(uri_str);
grpc_uri_destroy(uri); grpc_uri_destroy(uri);
@ -64,7 +88,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
const grpc_channel_args* args, const grpc_channel_args* args,
char** name_to_resolve, char** name_to_resolve,
grpc_channel_args** new_args) { grpc_channel_args** new_args) {
*name_to_resolve = grpc_get_http_proxy_server(exec_ctx); char* user_cred = NULL;
*name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
if (*name_to_resolve == NULL) return false; if (*name_to_resolve == NULL) return false;
grpc_uri* uri = grpc_uri* uri =
grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
@ -73,12 +98,16 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
"'http_proxy' environment variable set, but cannot " "'http_proxy' environment variable set, but cannot "
"parse server URI '%s' -- not using proxy", "parse server URI '%s' -- not using proxy",
server_uri); server_uri);
if (uri != NULL) grpc_uri_destroy(uri); if (uri != NULL) {
gpr_free(user_cred);
grpc_uri_destroy(uri);
}
return false; return false;
} }
if (strcmp(uri->scheme, "unix") == 0) { if (strcmp(uri->scheme, "unix") == 0) {
gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'", gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'",
server_uri); server_uri);
gpr_free(user_cred);
grpc_uri_destroy(uri); grpc_uri_destroy(uri);
return false; return false;
} }
@ -126,10 +155,25 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
} }
} }
} }
grpc_arg new_arg = grpc_channel_arg_string_create( grpc_arg args_to_add[2];
args_to_add[0] = grpc_channel_arg_string_create(
GRPC_ARG_HTTP_CONNECT_SERVER, GRPC_ARG_HTTP_CONNECT_SERVER,
uri->path[0] == '/' ? uri->path + 1 : uri->path); uri->path[0] == '/' ? uri->path + 1 : uri->path);
*new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1); if (user_cred != NULL) {
/* Use base64 encoding for user credentials as stated in RFC 7617 */
char* encoded_user_cred =
grpc_base64_encode(user_cred, strlen(user_cred), 0, 0);
char* header;
gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred);
gpr_free(encoded_user_cred);
args_to_add[1] =
grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header);
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2);
gpr_free(header);
} else {
*new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1);
}
gpr_free(user_cred);
grpc_uri_destroy(uri); grpc_uri_destroy(uri);
return true; return true;
} }

@ -656,6 +656,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
"ignoring grpc_chttp2_stream with non-client generated index %d", "ignoring grpc_chttp2_stream with non-client generated index %d",
t->incoming_stream_id)); t->incoming_stream_id));
return init_skip_frame_parser(exec_ctx, t, 1); return init_skip_frame_parser(exec_ctx, t, 1);
} else if (grpc_chttp2_stream_map_size(&t->stream_map) >=
t->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Max stream count exceeded");
} }
t->last_new_stream_id = t->incoming_stream_id; t->last_new_stream_id = t->incoming_stream_id;
s = t->incoming_stream = s = t->incoming_stream =

@ -46,6 +46,7 @@
#include "src/core/lib/iomgr/lockfree_event.h" #include "src/core/lib/iomgr/lockfree_event.h"
#include "src/core/lib/iomgr/wakeup_fd_posix.h" #include "src/core/lib/iomgr/wakeup_fd_posix.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
static grpc_wakeup_fd global_wakeup_fd; static grpc_wakeup_fd global_wakeup_fd;
static int g_epfd; static int g_epfd;
@ -78,8 +79,21 @@ static void fd_global_shutdown(void);
typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state; typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
static const char *kick_state_string(kick_state st) {
switch (st) {
case UNKICKED:
return "UNKICKED";
case KICKED:
return "KICKED";
case DESIGNATED_POLLER:
return "DESIGNATED_POLLER";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
struct grpc_pollset_worker { struct grpc_pollset_worker {
kick_state kick_state; kick_state kick_state;
int kick_state_mutator; // which line of code last changed kick state
bool initialized_cv; bool initialized_cv;
grpc_pollset_worker *next; grpc_pollset_worker *next;
grpc_pollset_worker *prev; grpc_pollset_worker *prev;
@ -87,6 +101,12 @@ struct grpc_pollset_worker {
grpc_closure_list schedule_on_end_work; grpc_closure_list schedule_on_end_work;
}; };
#define SET_KICK_STATE(worker, state) \
do { \
(worker)->kick_state = (state); \
(worker)->kick_state_mutator = __LINE__; \
} while (false)
#define MAX_NEIGHBOURHOODS 1024 #define MAX_NEIGHBOURHOODS 1024
typedef struct pollset_neighbourhood { typedef struct pollset_neighbourhood {
@ -101,10 +121,15 @@ struct grpc_pollset {
bool reassigning_neighbourhood; bool reassigning_neighbourhood;
grpc_pollset_worker *root_worker; grpc_pollset_worker *root_worker;
bool kicked_without_poller; bool kicked_without_poller;
/* Set to true if the pollset is observed to have no workers available to
* poll */
bool seen_inactive; bool seen_inactive;
bool shutting_down; /* Is the pollset shutting down ? */ bool shutting_down; /* Is the pollset shutting down ? */
bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
grpc_closure *shutdown_closure; /* Called after after shutdown is complete */ grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
/* Number of workers who are *about-to* attach themselves to the pollset
* worker list */
int begin_refs; int begin_refs;
grpc_pollset *next; grpc_pollset *next;
@ -264,29 +289,23 @@ static bool fd_is_shutdown(grpc_fd *fd) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
} }
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
} }
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) { grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure); grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll
sets (This can happen briefly during polling island merges). In such cases
it does not really matter which notifer is set as the read_notifier_pollset
(They would both point to the same polling island anyway) */
/* Use release store to match with acquire load in fd_get_read_notifier */ /* Use release store to match with acquire load in fd_get_read_notifier */
gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
} }
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure); grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
} }
/******************************************************************************* /*******************************************************************************
@ -411,18 +430,28 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
if (pollset->root_worker != NULL) { if (pollset->root_worker != NULL) {
grpc_pollset_worker *worker = pollset->root_worker; grpc_pollset_worker *worker = pollset->root_worker;
do { do {
switch (worker->kick_state) {
case KICKED:
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) { if (worker->initialized_cv) {
worker->kick_state = KICKED;
gpr_cv_signal(&worker->cv); gpr_cv_signal(&worker->cv);
} else { }
worker->kick_state = KICKED; break;
case DESIGNATED_POLLER:
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_shutdown"); "pollset_kick_all");
break;
} }
worker = worker->next; worker = worker->next;
} while (worker != pollset->root_worker); } while (worker != pollset->root_worker);
} }
// TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
// in the else case
return error; return error;
} }
@ -438,7 +467,9 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure) { grpc_closure *closure) {
GPR_ASSERT(pollset->shutdown_closure == NULL); GPR_ASSERT(pollset->shutdown_closure == NULL);
GPR_ASSERT(!pollset->shutting_down);
pollset->shutdown_closure = closure; pollset->shutdown_closure = closure;
pollset->shutting_down = true;
GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
} }
@ -506,10 +537,14 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_millis deadline) { grpc_millis deadline) {
if (worker_hdl != NULL) *worker_hdl = worker; if (worker_hdl != NULL) *worker_hdl = worker;
worker->initialized_cv = false; worker->initialized_cv = false;
worker->kick_state = UNKICKED; SET_KICK_STATE(worker, UNKICKED);
worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
pollset->begin_refs++; pollset->begin_refs++;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
}
if (pollset->seen_inactive) { if (pollset->seen_inactive) {
// pollset has been observed to be inactive, we need to move back to the // pollset has been observed to be inactive, we need to move back to the
// active list // active list
@ -525,6 +560,11 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
retry_lock_neighbourhood: retry_lock_neighbourhood:
gpr_mu_lock(&neighbourhood->mu); gpr_mu_lock(&neighbourhood->mu);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->kick_state),
is_reassigning);
}
if (pollset->seen_inactive) { if (pollset->seen_inactive) {
if (neighbourhood != pollset->neighbourhood) { if (neighbourhood != pollset->neighbourhood) {
gpr_mu_unlock(&neighbourhood->mu); gpr_mu_unlock(&neighbourhood->mu);
@ -535,8 +575,14 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->seen_inactive = false; pollset->seen_inactive = false;
if (neighbourhood->active_root == NULL) { if (neighbourhood->active_root == NULL) {
neighbourhood->active_root = pollset->next = pollset->prev = pollset; neighbourhood->active_root = pollset->next = pollset->prev = pollset;
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { /* TODO: sreek. Why would this worker state be other than UNKICKED
worker->kick_state = DESIGNATED_POLLER; * here ? (since the worker isn't added to the pollset yet, there is no
* way it can be "found" by other threads to get kicked). */
/* If there is no designated poller, make this the designated poller */
if (worker->kick_state == UNKICKED &&
gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
SET_KICK_STATE(worker, DESIGNATED_POLLER);
} }
} else { } else {
pollset->next = neighbourhood->active_root; pollset->next = neighbourhood->active_root;
@ -550,25 +596,54 @@ static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} }
gpr_mu_unlock(&neighbourhood->mu); gpr_mu_unlock(&neighbourhood->mu);
} }
worker_insert(pollset, worker); worker_insert(pollset, worker);
pollset->begin_refs--; pollset->begin_refs--;
if (worker->kick_state == UNKICKED) { if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) {
GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
worker->initialized_cv = true; worker->initialized_cv = true;
gpr_cv_init(&worker->cv); gpr_cv_init(&worker->cv);
while (worker->kick_state == UNKICKED && while (worker->kick_state == UNKICKED && !pollset->shutting_down) {
pollset->shutdown_closure == NULL) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->kick_state),
pollset->shutting_down);
}
if (gpr_cv_wait(&worker->cv, &pollset->mu, if (gpr_cv_wait(&worker->cv, &pollset->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) && grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
worker->kick_state == UNKICKED) { worker->kick_state == UNKICKED) {
worker->kick_state = KICKED; /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
received a kick */
SET_KICK_STATE(worker, KICKED);
} }
} }
grpc_exec_ctx_invalidate_now(exec_ctx); grpc_exec_ctx_invalidate_now(exec_ctx);
} }
return worker->kick_state == DESIGNATED_POLLER && if (GRPC_TRACER_ON(grpc_polling_trace)) {
pollset->shutdown_closure == NULL; gpr_log(GPR_ERROR,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->kick_state),
pollset->shutting_down, pollset->kicked_without_poller);
}
/* We release pollset lock in this function at a couple of places:
* 1. Briefly when assigning pollset to a neighbourhood
* 2. When doing gpr_cv_wait()
* It is possible that 'kicked_without_poller' was set to true during (1) and
* 'shutting_down' is set to true during (1) or (2). If either of them is
* true, this worker cannot do polling */
/* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
* case; especially when the worker is the DESIGNATED_POLLER */
if (pollset->kicked_without_poller) {
pollset->kicked_without_poller = false;
return false;
}
return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down;
} }
static bool check_neighbourhood_for_available_poller( static bool check_neighbourhood_for_available_poller(
@ -588,10 +663,18 @@ static bool check_neighbourhood_for_available_poller(
case UNKICKED: case UNKICKED:
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) { (gpr_atm)inspect_worker)) {
inspect_worker->kick_state = DESIGNATED_POLLER; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
inspect_worker);
}
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) { if (inspect_worker->initialized_cv) {
gpr_cv_signal(&inspect_worker->cv); gpr_cv_signal(&inspect_worker->cv);
} }
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
}
} }
// even if we didn't win the cas, there's a worker, we can stop // even if we didn't win the cas, there's a worker, we can stop
found_worker = true; found_worker = true;
@ -604,9 +687,12 @@ static bool check_neighbourhood_for_available_poller(
break; break;
} }
inspect_worker = inspect_worker->next; inspect_worker = inspect_worker->next;
} while (inspect_worker != inspect->root_worker); } while (!found_worker && inspect_worker != inspect->root_worker);
} }
if (!found_worker) { if (!found_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
}
inspect->seen_inactive = true; inspect->seen_inactive = true;
if (inspect == neighbourhood->active_root) { if (inspect == neighbourhood->active_root) {
neighbourhood->active_root = neighbourhood->active_root =
@ -624,15 +710,22 @@ static bool check_neighbourhood_for_available_poller(
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_pollset_worker *worker, grpc_pollset_worker *worker,
grpc_pollset_worker **worker_hdl) { grpc_pollset_worker **worker_hdl) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
}
if (worker_hdl != NULL) *worker_hdl = NULL; if (worker_hdl != NULL) *worker_hdl = NULL;
worker->kick_state = KICKED; /* Make sure we appear kicked */
SET_KICK_STATE(worker, KICKED);
grpc_closure_list_move(&worker->schedule_on_end_work, grpc_closure_list_move(&worker->schedule_on_end_work,
&exec_ctx->closure_list); &exec_ctx->closure_list);
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->kick_state == UNKICKED) { if (worker->next != worker && worker->next->kick_state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
}
GPR_ASSERT(worker->next->initialized_cv); GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
worker->next->kick_state = DESIGNATED_POLLER; SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
gpr_cv_signal(&worker->next->cv); gpr_cv_signal(&worker->next->cv);
if (grpc_exec_ctx_has_work(exec_ctx)) { if (grpc_exec_ctx_has_work(exec_ctx)) {
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
@ -641,9 +734,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
} }
} else { } else {
gpr_atm_no_barrier_store(&g_active_poller, 0); gpr_atm_no_barrier_store(&g_active_poller, 0);
gpr_mu_unlock(&pollset->mu);
size_t poller_neighbourhood_idx = size_t poller_neighbourhood_idx =
(size_t)(pollset->neighbourhood - g_neighbourhoods); (size_t)(pollset->neighbourhood - g_neighbourhoods);
gpr_mu_unlock(&pollset->mu);
bool found_worker = false; bool found_worker = false;
bool scan_state[MAX_NEIGHBOURHOODS]; bool scan_state[MAX_NEIGHBOURHOODS];
for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) { for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
@ -679,6 +772,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
if (worker->initialized_cv) { if (worker->initialized_cv) {
gpr_cv_destroy(&worker->cv); gpr_cv_destroy(&worker->cv);
} }
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, " .. remove worker");
}
if (EMPTIED == worker_remove(pollset, worker)) { if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(exec_ctx, pollset); pollset_maybe_finish_shutdown(exec_ctx, pollset);
} }
@ -699,15 +795,17 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
pollset->kicked_without_poller = false; pollset->kicked_without_poller = false;
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
if (begin_worker(exec_ctx, pollset, &worker, worker_hdl, deadline)) { if (begin_worker(exec_ctx, pollset, &worker, worker_hdl, deadline)) {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
GPR_ASSERT(!pollset->shutdown_closure); GPR_ASSERT(!pollset->shutting_down);
GPR_ASSERT(!pollset->seen_inactive); GPR_ASSERT(!pollset->seen_inactive);
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
append_error(&error, pollset_epoll(exec_ctx, pollset, deadline), err_desc); append_error(&error, pollset_epoll(exec_ctx, pollset, deadline), err_desc);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
gpr_tls_set(&g_current_thread_worker, 0); gpr_tls_set(&g_current_thread_worker, 0);
} else {
gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
} }
end_worker(exec_ctx, pollset, &worker, worker_hdl); end_worker(exec_ctx, pollset, &worker, worker_hdl);
gpr_tls_set(&g_current_thread_pollset, 0); gpr_tls_set(&g_current_thread_pollset, 0);
@ -716,46 +814,136 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
static grpc_error *pollset_kick(grpc_pollset *pollset, static grpc_error *pollset_kick(grpc_pollset *pollset,
grpc_pollset_worker *specific_worker) { grpc_pollset_worker *specific_worker) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_strvec log;
gpr_strvec_init(&log);
char *tmp;
gpr_asprintf(
&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
specific_worker, (void *)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
kick_state_string(pollset->root_worker->kick_state),
pollset->root_worker->next,
kick_state_string(pollset->root_worker->next->kick_state));
gpr_strvec_add(&log, tmp);
}
if (specific_worker != NULL) {
gpr_asprintf(&tmp, " worker_kick_state=%s",
kick_state_string(specific_worker->kick_state));
gpr_strvec_add(&log, tmp);
}
tmp = gpr_strvec_flatten(&log, NULL);
gpr_strvec_destroy(&log);
gpr_log(GPR_ERROR, "%s", tmp);
gpr_free(tmp);
}
if (specific_worker == NULL) { if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
grpc_pollset_worker *root_worker = pollset->root_worker; grpc_pollset_worker *root_worker = pollset->root_worker;
if (root_worker == NULL) { if (root_worker == NULL) {
pollset->kicked_without_poller = true; pollset->kicked_without_poller = true;
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked_without_poller");
}
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
grpc_pollset_worker *next_worker = root_worker->next; grpc_pollset_worker *next_worker = root_worker->next;
if (root_worker == next_worker && if (root_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
return GRPC_ERROR_NONE;
} else if (next_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
}
SET_KICK_STATE(next_worker, KICKED);
return GRPC_ERROR_NONE;
} else if (root_worker ==
next_worker && // only try and wake up a poller if
// there is no next worker
root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
&g_active_poller)) { &g_active_poller)) {
root_worker->kick_state = KICKED; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd); return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (next_worker->kick_state == UNKICKED) { } else if (next_worker->kick_state == UNKICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
}
GPR_ASSERT(next_worker->initialized_cv); GPR_ASSERT(next_worker->initialized_cv);
next_worker->kick_state = KICKED; SET_KICK_STATE(next_worker, KICKED);
gpr_cv_signal(&next_worker->cv); gpr_cv_signal(&next_worker->cv);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else if (next_worker->kick_state == DESIGNATED_POLLER) {
if (root_worker->kick_state != DESIGNATED_POLLER) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(
GPR_ERROR,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker);
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
gpr_cv_signal(&root_worker->cv);
}
return GRPC_ERROR_NONE;
} else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
}
SET_KICK_STATE(next_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
}
} else { } else {
GPR_ASSERT(next_worker->kick_state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
} else { } else {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kicked while waking up");
}
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
} else if (specific_worker->kick_state == KICKED) { } else if (specific_worker->kick_state == KICKED) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. specific worker already kicked");
}
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else if (gpr_tls_get(&g_current_thread_worker) == } else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) { (intptr_t)specific_worker) {
specific_worker->kick_state = KICKED; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
}
SET_KICK_STATE(specific_worker, KICKED);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else if (specific_worker == } else if (specific_worker ==
(grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
specific_worker->kick_state = KICKED; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick active poller");
}
SET_KICK_STATE(specific_worker, KICKED);
return grpc_wakeup_fd_wakeup(&global_wakeup_fd); return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
} else if (specific_worker->initialized_cv) { } else if (specific_worker->initialized_cv) {
specific_worker->kick_state = KICKED; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv); gpr_cv_signal(&specific_worker->cv);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} else { } else {
specific_worker->kick_state = KICKED; if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_ERROR, " .. kick non-waiting worker");
}
SET_KICK_STATE(specific_worker, KICKED);
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
} }
@ -801,6 +989,7 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
static void shutdown_engine(void) { static void shutdown_engine(void) {
fd_global_shutdown(); fd_global_shutdown();
pollset_global_shutdown(); pollset_global_shutdown();
close(g_epfd);
} }
static const grpc_event_engine_vtable vtable = { static const grpc_event_engine_vtable vtable = {
@ -837,9 +1026,6 @@ static const grpc_event_engine_vtable vtable = {
/* It is possible that GLIBC has epoll but the underlying kernel doesn't. /* It is possible that GLIBC has epoll but the underlying kernel doesn't.
* Create a dummy epoll_fd to make sure epoll support is available */ * Create a dummy epoll_fd to make sure epoll support is available */
const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
/* TODO(ctiller): temporary, until this stabilizes */
if (!explicit_request) return NULL;
if (!grpc_has_wakeup_fd()) { if (!grpc_has_wakeup_fd()) {
return NULL; return NULL;
} }
@ -858,6 +1044,8 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
return NULL; return NULL;
} }
gpr_log(GPR_ERROR, "grpc epoll fd: %d", g_epfd);
return &vtable; return &vtable;
} }

@ -1007,12 +1007,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
} }
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
} }
/******************************************************************************* /*******************************************************************************
@ -1209,7 +1209,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) { grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure); grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with /* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll different 'notifier's when an fd becomes readable and it is in two epoll
@ -1221,7 +1221,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} }
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure); grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
} }
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,

@ -560,12 +560,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
} }
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
} }
/******************************************************************************* /*******************************************************************************
@ -696,11 +696,11 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
} }
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure); grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
} }
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure); grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
} }
static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps,

@ -439,12 +439,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
} }
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
} }
/******************************************************************************* /*******************************************************************************
@ -698,7 +698,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) { grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure); grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with /* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll different 'notifier's when an fd becomes readable and it is in two epoll
@ -710,7 +710,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} }
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure); grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
} }
static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { static grpc_error *fd_become_pollable_locked(grpc_fd *fd) {
@ -1037,8 +1037,8 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
/* Introduce a spurious completion. /* Introduce a spurious completion.
If we do not, then it may be that the fd-specific epoll set consumed If we do not, then it may be that the fd-specific epoll set consumed
a completion without being polled, leading to a missed edge going up. */ a completion without being polled, leading to a missed edge going up. */
grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure); grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read");
grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure); grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write");
pollset_kick_all(exec_ctx, pollset); pollset_kick_all(exec_ctx, pollset);
pollset->current_pollable = &pollset->pollable; pollset->current_pollable = &pollset->pollable;
if (append_error(&error, pollable_materialize(&pollset->pollable), if (append_error(&error, pollable_materialize(&pollset->pollable),

@ -934,12 +934,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
} }
static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_closure *closure) { grpc_closure *closure) {
grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
} }
/******************************************************************************* /*******************************************************************************
@ -1102,7 +1102,7 @@ static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
grpc_pollset *notifier) { grpc_pollset *notifier) {
grpc_lfev_set_ready(exec_ctx, &fd->read_closure); grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
/* Note, it is possible that fd_become_readable might be called twice with /* Note, it is possible that fd_become_readable might be called twice with
different 'notifier's when an fd becomes readable and it is in two epoll different 'notifier's when an fd becomes readable and it is in two epoll
@ -1114,7 +1114,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
} }
static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
grpc_lfev_set_ready(exec_ctx, &fd->write_closure); grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
} }
static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
@ -1718,7 +1718,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux(
if (!is_grpc_wakeup_signal_initialized) { if (!is_grpc_wakeup_signal_initialized) {
/* TODO(ctiller): when other epoll engines are ready, remove the true || to /* TODO(ctiller): when other epoll engines are ready, remove the true || to
* force this to be explitly chosen if needed */ * force this to be explitly chosen if needed */
if (true || explicit_request) { if (explicit_request) {
grpc_use_signal(SIGRTMIN + 6); grpc_use_signal(SIGRTMIN + 6);
} else { } else {
return NULL; return NULL;

@ -79,12 +79,12 @@ bool grpc_lfev_is_shutdown(gpr_atm *state) {
} }
void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure) { grpc_closure *closure, const char *variable) {
while (true) { while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state); gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_notify_on: %p curr=%p closure=%p", state, gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable,
(void *)curr, closure); state, (void *)curr, closure);
} }
switch (curr) { switch (curr) {
case CLOSURE_NOT_READY: { case CLOSURE_NOT_READY: {
@ -149,7 +149,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
while (true) { while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state); gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_shutdown: %p curr=%p err=%s", state, gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state,
(void *)curr, grpc_error_string(shutdown_err)); (void *)curr, grpc_error_string(shutdown_err));
} }
switch (curr) { switch (curr) {
@ -193,12 +193,14 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
GPR_UNREACHABLE_CODE(return false); GPR_UNREACHABLE_CODE(return false);
} }
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) { void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
const char *variable) {
while (true) { while (true) {
gpr_atm curr = gpr_atm_no_barrier_load(state); gpr_atm curr = gpr_atm_no_barrier_load(state);
if (GRPC_TRACER_ON(grpc_polling_trace)) { if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "lfev_set_ready: %p curr=%p", state, (void *)curr); gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state,
(void *)curr);
} }
switch (curr) { switch (curr) {

@ -30,10 +30,11 @@ void grpc_lfev_destroy(gpr_atm *state);
bool grpc_lfev_is_shutdown(gpr_atm *state); bool grpc_lfev_is_shutdown(gpr_atm *state);
void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_closure *closure); grpc_closure *closure, const char *variable);
/* Returns true on first successful shutdown */ /* Returns true on first successful shutdown */
bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state,
grpc_error *shutdown_err); grpc_error *shutdown_err);
void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state); void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state,
const char *variable);
#endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */ #endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */

@ -165,8 +165,6 @@
}], }],
['OS == "mac"', { ['OS == "mac"', {
'xcode_settings': { 'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
},
% if defaults['global'].get('CPPFLAGS', None) is not None: % if defaults['global'].get('CPPFLAGS', None) is not None:
'OTHER_CFLAGS': [ 'OTHER_CFLAGS': [
% for item in defaults['global'].get('CPPFLAGS').split(): % for item in defaults['global'].get('CPPFLAGS').split():
@ -174,10 +172,15 @@
% endfor % endfor
], ],
'OTHER_CPLUSPLUSFLAGS': [ 'OTHER_CPLUSPLUSFLAGS': [
% for item in defaults['global'].get('CPPFLAGS').split():
'${item}',
% endfor
'-stdlib=libc++', '-stdlib=libc++',
'-std=c++11' '-std=c++11',
'-Wno-error=deprecated-declarations'
], ],
% endif % endif
},
}] }]
] ]
}, },
@ -201,6 +204,13 @@
'${source}', '${source}',
% endfor % endfor
], ],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
}, },
% endif % endif
% endfor % endfor
@ -282,6 +292,13 @@
'${source}', '${source}',
% endfor % endfor
], ],
'conditions': [
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}]
]
}, },
% endif % endif
% endfor % endfor
@ -317,6 +334,11 @@
'ldflags': [ 'ldflags': [
'-Wl,-wrap,memcpy' '-Wl,-wrap,memcpy'
] ]
}],
['OS == "mac"', {
'xcode_settings': {
'MACOSX_DEPLOYMENT_TARGET': '10.9'
}
}] }]
], ],
"target_name": "${module.name}", "target_name": "${module.name}",

@ -106,6 +106,8 @@ extern void ping(grpc_end2end_test_config config);
extern void ping_pre_init(void); extern void ping_pre_init(void);
extern void ping_pong_streaming(grpc_end2end_test_config config); extern void ping_pong_streaming(grpc_end2end_test_config config);
extern void ping_pong_streaming_pre_init(void); extern void ping_pong_streaming_pre_init(void);
extern void proxy_auth(grpc_end2end_test_config config);
extern void proxy_auth_pre_init(void);
extern void registered_call(grpc_end2end_test_config config); extern void registered_call(grpc_end2end_test_config config);
extern void registered_call_pre_init(void); extern void registered_call_pre_init(void);
extern void request_with_flags(grpc_end2end_test_config config); extern void request_with_flags(grpc_end2end_test_config config);
@ -181,6 +183,7 @@ void grpc_end2end_tests_pre_init(void) {
payload_pre_init(); payload_pre_init();
ping_pre_init(); ping_pre_init();
ping_pong_streaming_pre_init(); ping_pong_streaming_pre_init();
proxy_auth_pre_init();
registered_call_pre_init(); registered_call_pre_init();
request_with_flags_pre_init(); request_with_flags_pre_init();
request_with_payload_pre_init(); request_with_payload_pre_init();
@ -244,6 +247,7 @@ void grpc_end2end_tests(int argc, char **argv,
payload(config); payload(config);
ping(config); ping(config);
ping_pong_streaming(config); ping_pong_streaming(config);
proxy_auth(config);
registered_call(config); registered_call(config);
request_with_flags(config); request_with_flags(config);
request_with_payload(config); request_with_payload(config);
@ -416,6 +420,10 @@ void grpc_end2end_tests(int argc, char **argv,
ping_pong_streaming(config); ping_pong_streaming(config);
continue; continue;
} }
if (0 == strcmp("proxy_auth", argv[i])) {
proxy_auth(config);
continue;
}
if (0 == strcmp("registered_call", argv[i])) { if (0 == strcmp("registered_call", argv[i])) {
registered_call(config); registered_call(config);
continue; continue;

@ -108,6 +108,8 @@ extern void ping(grpc_end2end_test_config config);
extern void ping_pre_init(void); extern void ping_pre_init(void);
extern void ping_pong_streaming(grpc_end2end_test_config config); extern void ping_pong_streaming(grpc_end2end_test_config config);
extern void ping_pong_streaming_pre_init(void); extern void ping_pong_streaming_pre_init(void);
extern void proxy_auth(grpc_end2end_test_config config);
extern void proxy_auth_pre_init(void);
extern void registered_call(grpc_end2end_test_config config); extern void registered_call(grpc_end2end_test_config config);
extern void registered_call_pre_init(void); extern void registered_call_pre_init(void);
extern void request_with_flags(grpc_end2end_test_config config); extern void request_with_flags(grpc_end2end_test_config config);
@ -184,6 +186,7 @@ void grpc_end2end_tests_pre_init(void) {
payload_pre_init(); payload_pre_init();
ping_pre_init(); ping_pre_init();
ping_pong_streaming_pre_init(); ping_pong_streaming_pre_init();
proxy_auth_pre_init();
registered_call_pre_init(); registered_call_pre_init();
request_with_flags_pre_init(); request_with_flags_pre_init();
request_with_payload_pre_init(); request_with_payload_pre_init();
@ -248,6 +251,7 @@ void grpc_end2end_tests(int argc, char **argv,
payload(config); payload(config);
ping(config); ping(config);
ping_pong_streaming(config); ping_pong_streaming(config);
proxy_auth(config);
registered_call(config); registered_call(config);
request_with_flags(config); request_with_flags(config);
request_with_payload(config); request_with_payload(config);
@ -424,6 +428,10 @@ void grpc_end2end_tests(int argc, char **argv,
ping_pong_streaming(config); ping_pong_streaming(config);
continue; continue;
} }
if (0 == strcmp("proxy_auth", argv[i])) {
proxy_auth(config);
continue;
}
if (0 == strcmp("registered_call", argv[i])) { if (0 == strcmp("registered_call", argv[i])) {
registered_call(config); registered_call(config);
continue; continue;

@ -47,11 +47,13 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_channel_args *client_args, grpc_channel_args *server_args) {
grpc_end2end_test_fixture f; grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f)); memset(&f, 0, sizeof(f));
fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data)); fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data));
const int server_port = grpc_pick_unused_port_or_die(); const int server_port = grpc_pick_unused_port_or_die();
gpr_join_host_port(&ffd->server_addr, "localhost", server_port); gpr_join_host_port(&ffd->server_addr, "localhost", server_port);
ffd->proxy = grpc_end2end_http_proxy_create();
/* Passing client_args to proxy_create for the case of checking for proxy auth
*/
ffd->proxy = grpc_end2end_http_proxy_create(client_args);
f.fixture_data = ffd; f.fixture_data = ffd;
f.cq = grpc_completion_queue_create_for_next(NULL); f.cq = grpc_completion_queue_create_for_next(NULL);
@ -64,8 +66,17 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args) { grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data; fullstack_fixture_data *ffd = f->fixture_data;
char *proxy_uri; char *proxy_uri;
/* If testing for proxy auth, add credentials to proxy uri */
const grpc_arg *proxy_auth_arg =
grpc_channel_args_find(client_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS);
if (proxy_auth_arg == NULL || proxy_auth_arg->type != GRPC_ARG_STRING) {
gpr_asprintf(&proxy_uri, "http://%s", gpr_asprintf(&proxy_uri, "http://%s",
grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); grpc_end2end_http_proxy_get_proxy_name(ffd->proxy));
} else {
gpr_asprintf(&proxy_uri, "http://%s@%s", proxy_auth_arg->value.string,
grpc_end2end_http_proxy_get_proxy_name(ffd->proxy));
}
gpr_setenv("http_proxy", proxy_uri); gpr_setenv("http_proxy", proxy_uri);
gpr_free(proxy_uri); gpr_free(proxy_uri);
f->client = grpc_insecure_channel_create(ffd->server_addr, client_args, NULL); f->client = grpc_insecure_channel_create(ffd->server_addr, client_args, NULL);

@ -22,6 +22,7 @@
#include <string.h> #include <string.h>
#include <grpc/grpc.h>
#include <grpc/slice_buffer.h> #include <grpc/slice_buffer.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/atm.h> #include <grpc/support/atm.h>
@ -46,7 +47,9 @@
#include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/tcp_client.h"
#include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/iomgr/tcp_server.h"
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/support/string.h"
#include "test/core/util/port.h" #include "test/core/util/port.h"
struct grpc_end2end_http_proxy { struct grpc_end2end_http_proxy {
@ -304,6 +307,28 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg,
&conn->on_write_response_done); &conn->on_write_response_done);
} }
/**
* Parses the proxy auth header value to check if it matches :-
* Basic <base64_encoded_expected_cred>
* Returns true if it matches, false otherwise
*/
static bool proxy_auth_header_matches(grpc_exec_ctx* exec_ctx,
char* proxy_auth_header_val,
char* expected_cred) {
GPR_ASSERT(proxy_auth_header_val != NULL);
GPR_ASSERT(expected_cred != NULL);
if (strncmp(proxy_auth_header_val, "Basic ", 6) != 0) {
return false;
}
proxy_auth_header_val += 6;
grpc_slice decoded_slice =
grpc_base64_decode(exec_ctx, proxy_auth_header_val, 0);
const bool header_matches =
grpc_slice_str_cmp(decoded_slice, expected_cred) == 0;
grpc_slice_unref_internal(exec_ctx, decoded_slice);
return header_matches;
}
// Callback to read the HTTP CONNECT request. // Callback to read the HTTP CONNECT request.
// TODO(roth): Technically, for any of the failure modes handled by this // TODO(roth): Technically, for any of the failure modes handled by this
// function, we should handle the error by returning an HTTP response to // function, we should handle the error by returning an HTTP response to
@ -352,6 +377,28 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
return; return;
} }
// If proxy auth is being used, check if the header is present and as expected
const grpc_arg* proxy_auth_arg = grpc_channel_args_find(
conn->proxy->channel_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS);
if (proxy_auth_arg != NULL && proxy_auth_arg->type == GRPC_ARG_STRING) {
bool client_authenticated = false;
for (size_t i = 0; i < conn->http_request.hdr_count; i++) {
if (strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) {
client_authenticated = proxy_auth_header_matches(
exec_ctx, conn->http_request.hdrs[i].value,
proxy_auth_arg->value.string);
break;
}
}
if (!client_authenticated) {
const char* msg = "HTTP Connect could not verify authentication";
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(msg);
proxy_connection_failed(exec_ctx, conn, true /* is_client */,
"HTTP proxy read request", error);
GRPC_ERROR_UNREF(error);
return;
}
}
// Resolve address. // Resolve address.
grpc_resolved_addresses* resolved_addresses = NULL; grpc_resolved_addresses* resolved_addresses = NULL;
error = grpc_blocking_resolve_address(conn->http_request.path, "80", error = grpc_blocking_resolve_address(conn->http_request.path, "80",
@ -434,7 +481,8 @@ static void thread_main(void* arg) {
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
} }
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) { grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
grpc_channel_args* args) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_end2end_http_proxy* proxy = grpc_end2end_http_proxy* proxy =
(grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy)); (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy));
@ -446,7 +494,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) {
gpr_join_host_port(&proxy->proxy_name, "localhost", proxy_port); gpr_join_host_port(&proxy->proxy_name, "localhost", proxy_port);
gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name); gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name);
// Create TCP server. // Create TCP server.
proxy->channel_args = grpc_channel_args_copy(NULL); proxy->channel_args = grpc_channel_args_copy(args);
grpc_error* error = grpc_tcp_server_create( grpc_error* error = grpc_tcp_server_create(
&exec_ctx, NULL, proxy->channel_args, &proxy->server); &exec_ctx, NULL, proxy->channel_args, &proxy->server);
GPR_ASSERT(error == GRPC_ERROR_NONE); GPR_ASSERT(error == GRPC_ERROR_NONE);

@ -16,11 +16,28 @@
* *
*/ */
#ifndef GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H
#define GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H
#include <grpc/grpc.h>
/* The test credentials being used for HTTP Proxy Authorization */
#define GRPC_TEST_HTTP_PROXY_AUTH_CREDS "aladdin:opensesame"
/* A channel arg key used to indicate that the channel uses proxy authorization.
* The value (string) should be the proxy auth credentials that should be
* checked.
*/
#define GRPC_ARG_HTTP_PROXY_AUTH_CREDS "grpc.test.proxy_auth"
typedef struct grpc_end2end_http_proxy grpc_end2end_http_proxy; typedef struct grpc_end2end_http_proxy grpc_end2end_http_proxy;
grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(); grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
grpc_channel_args* args);
void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy); void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy);
const char* grpc_end2end_http_proxy_get_proxy_name( const char* grpc_end2end_http_proxy_get_proxy_name(
grpc_end2end_http_proxy* proxy); grpc_end2end_http_proxy* proxy);
#endif /* GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H */

@ -24,9 +24,9 @@ import hashlib
FixtureOptions = collections.namedtuple( FixtureOptions = collections.namedtuple(
'FixtureOptions', 'FixtureOptions',
'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2') 'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth')
default_unsecure_fixture_options = FixtureOptions( default_unsecure_fixture_options = FixtureOptions(
True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True) True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False)
socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False) socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False)
default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True) default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True)
uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv']) uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv'])
@ -47,7 +47,7 @@ END2END_FIXTURES = {
'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True), 'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True),
'h2_full+workarounds': default_unsecure_fixture_options, 'h2_full+workarounds': default_unsecure_fixture_options,
'h2_http_proxy': default_unsecure_fixture_options._replace( 'h2_http_proxy': default_unsecure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']), ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True),
'h2_oauth2': default_secure_fixture_options._replace( 'h2_oauth2': default_secure_fixture_options._replace(
ci_mac=False, exclude_iomgrs=['uv']), ci_mac=False, exclude_iomgrs=['uv']),
'h2_proxy': default_unsecure_fixture_options._replace( 'h2_proxy': default_unsecure_fixture_options._replace(
@ -69,8 +69,8 @@ END2END_FIXTURES = {
TestOptions = collections.namedtuple( TestOptions = collections.namedtuple(
'TestOptions', 'TestOptions',
'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2') 'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth')
default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False) default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False)
connectivity_test_options = default_test_options._replace(needs_fullstack=True) connectivity_test_options = default_test_options._replace(needs_fullstack=True)
LOWCPU = 0.1 LOWCPU = 0.1
@ -128,6 +128,7 @@ END2END_TESTS = {
'load_reporting_hook': default_test_options, 'load_reporting_hook': default_test_options,
'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU), 'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU),
'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU), 'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU),
'proxy_auth': default_test_options._replace(needs_proxy_auth=True),
'registered_call': default_test_options, 'registered_call': default_test_options,
'request_with_flags': default_test_options._replace( 'request_with_flags': default_test_options._replace(
proxyable=False, cpu_cost=LOWCPU), proxyable=False, cpu_cost=LOWCPU),
@ -178,6 +179,9 @@ def compatible(f, t):
if END2END_TESTS[t].needs_http2: if END2END_TESTS[t].needs_http2:
if not END2END_FIXTURES[f].is_http2: if not END2END_FIXTURES[f].is_http2:
return False return False
if END2END_TESTS[t].needs_proxy_auth:
if not END2END_FIXTURES[f].supports_proxy_auth:
return False
return True return True

@ -21,7 +21,7 @@ load("//bazel:grpc_build_system.bzl", "grpc_sh_test", "grpc_cc_binary", "grpc_cc
def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True, def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True,
name_resolution=True, secure=True, tracing=False, name_resolution=True, secure=True, tracing=False,
platforms=['windows', 'linux', 'mac', 'posix'], platforms=['windows', 'linux', 'mac', 'posix'],
is_inproc=False, is_http2=True): is_inproc=False, is_http2=True, supports_proxy_auth=False):
return struct( return struct(
fullstack=fullstack, fullstack=fullstack,
includes_proxy=includes_proxy, includes_proxy=includes_proxy,
@ -30,7 +30,8 @@ def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True,
secure=secure, secure=secure,
tracing=tracing, tracing=tracing,
is_inproc=is_inproc, is_inproc=is_inproc,
is_http2=is_http2 is_http2=is_http2,
supports_proxy_auth=supports_proxy_auth
#platforms=platforms #platforms=platforms
) )
@ -47,7 +48,7 @@ END2END_FIXTURES = {
'h2_full+pipe': fixture_options(platforms=['linux']), 'h2_full+pipe': fixture_options(platforms=['linux']),
'h2_full+trace': fixture_options(tracing=True), 'h2_full+trace': fixture_options(tracing=True),
'h2_full+workarounds': fixture_options(), 'h2_full+workarounds': fixture_options(),
'h2_http_proxy': fixture_options(), 'h2_http_proxy': fixture_options(supports_proxy_auth=True),
'h2_oauth2': fixture_options(), 'h2_oauth2': fixture_options(),
'h2_proxy': fixture_options(includes_proxy=True), 'h2_proxy': fixture_options(includes_proxy=True),
'h2_sockpair_1byte': fixture_options(fullstack=False, dns_resolver=False), 'h2_sockpair_1byte': fixture_options(fullstack=False, dns_resolver=False),
@ -67,7 +68,8 @@ END2END_FIXTURES = {
def test_options(needs_fullstack=False, needs_dns=False, needs_names=False, def test_options(needs_fullstack=False, needs_dns=False, needs_names=False,
proxyable=True, secure=False, traceable=False, proxyable=True, secure=False, traceable=False,
exclude_inproc=False, needs_http2=False): exclude_inproc=False, needs_http2=False,
needs_proxy_auth=False):
return struct( return struct(
needs_fullstack=needs_fullstack, needs_fullstack=needs_fullstack,
needs_dns=needs_dns, needs_dns=needs_dns,
@ -76,7 +78,8 @@ def test_options(needs_fullstack=False, needs_dns=False, needs_names=False,
secure=secure, secure=secure,
traceable=traceable, traceable=traceable,
exclude_inproc=exclude_inproc, exclude_inproc=exclude_inproc,
needs_http2=needs_http2 needs_http2=needs_http2,
needs_proxy_auth=needs_proxy_auth
) )
@ -123,6 +126,7 @@ END2END_TESTS = {
'load_reporting_hook': test_options(), 'load_reporting_hook': test_options(),
'ping_pong_streaming': test_options(), 'ping_pong_streaming': test_options(),
'ping': test_options(needs_fullstack=True, proxyable=False), 'ping': test_options(needs_fullstack=True, proxyable=False),
'proxy_auth': test_options(needs_proxy_auth=True),
'registered_call': test_options(), 'registered_call': test_options(),
'request_with_flags': test_options(proxyable=False), 'request_with_flags': test_options(proxyable=False),
'request_with_payload': test_options(), 'request_with_payload': test_options(),
@ -165,6 +169,9 @@ def compatible(fopt, topt):
if topt.needs_http2: if topt.needs_http2:
if not fopt.is_http2: if not fopt.is_http2:
return False return False
if topt.needs_proxy_auth:
if not fopt.supports_proxy_auth:
return False
return True return True

@ -0,0 +1,235 @@
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* This test is for checking whether proxy authentication is working with HTTP
* Connect.
*/
#include "test/core/end2end/end2end_tests.h"
#include "test/core/end2end/fixtures/http_proxy_fixture.h"
#include <stdio.h>
#include <string.h>
#include <grpc/byte_buffer.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/lib/support/string.h"
#include "test/core/end2end/cq_verifier.h"
static void *tag(intptr_t t) { return (void *)t; }
static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
const char *test_name,
grpc_channel_args *client_args,
grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
f = config.create_fixture(client_args, server_args);
config.init_server(&f, server_args);
config.init_client(&f, client_args);
return f;
}
static gpr_timespec n_seconds_from_now(int n) {
return grpc_timeout_seconds_to_deadline(n);
}
static gpr_timespec five_seconds_from_now(void) {
return n_seconds_from_now(5);
}
static void drain_cq(grpc_completion_queue *cq) {
grpc_event ev;
do {
ev = grpc_completion_queue_next(cq, five_seconds_from_now(), NULL);
} while (ev.type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
grpc_timeout_seconds_to_deadline(5),
NULL)
.type == GRPC_OP_COMPLETE);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->cq);
drain_cq(f->cq);
grpc_completion_queue_destroy(f->cq);
grpc_completion_queue_destroy(f->shutdown_cq);
}
static void simple_request_body(grpc_end2end_test_config config,
grpc_end2end_test_fixture f) {
grpc_call *c;
grpc_call *s;
cq_verifier *cqv = cq_verifier_create(f.cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
char *peer;
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/foo"),
get_host_override_slice("foo.test.google.fr:1234", config), deadline,
NULL);
GPR_ASSERT(c);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "client_peer_before_call=%s", peer);
gpr_free(peer);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
error =
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(101));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
cq_verify(cqv);
peer = grpc_call_get_peer(s);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "server_peer=%s", peer);
gpr_free(peer);
peer = grpc_call_get_peer(c);
GPR_ASSERT(peer != NULL);
gpr_log(GPR_DEBUG, "client_peer=%s", peer);
gpr_free(peer);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
grpc_slice status_details = grpc_slice_from_static_string("xyz");
op->data.send_status_from_server.status_details = &status_details;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
cq_verify(cqv);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo"));
validate_host_override_string("foo.test.google.fr:1234", call_details.host,
config);
GPR_ASSERT(0 == call_details.flags);
GPR_ASSERT(was_cancelled == 1);
grpc_slice_unref(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_unref(c);
grpc_call_unref(s);
cq_verifier_destroy(cqv);
}
static void test_invoke_proxy_auth(grpc_end2end_test_config config) {
/* Indicate that the proxy requires user auth */
grpc_arg client_arg = {.type = GRPC_ARG_STRING,
.key = GRPC_ARG_HTTP_PROXY_AUTH_CREDS,
.value.string = GRPC_TEST_HTTP_PROXY_AUTH_CREDS};
grpc_channel_args client_args = {.num_args = 1, .args = &client_arg};
grpc_end2end_test_fixture f =
begin_test(config, "test_invoke_proxy_auth", &client_args, NULL);
simple_request_body(config, f);
end_test(&f);
config.tear_down_data(&f);
}
void proxy_auth(grpc_end2end_test_config config) {
test_invoke_proxy_auth(config);
}
void proxy_auth_pre_init(void) {}

@ -190,7 +190,8 @@ static void consumer_thread(void *arg) {
gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); gpr_log(GPR_INFO, "consumer %d phase 2", opt->id);
for (;;) { for (;;) {
ev = grpc_completion_queue_next(opt->cc, ten_seconds_time(), NULL); ev = grpc_completion_queue_next(opt->cc,
gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL);
switch (ev.type) { switch (ev.type) {
case GRPC_OP_COMPLETE: case GRPC_OP_COMPLETE:
GPR_ASSERT(ev.success); GPR_ASSERT(ev.success);

@ -67,6 +67,12 @@ def _args():
default=20, default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py' help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
) )
argp.add_argument(
'-r',
'--regex',
type=str,
default="",
help='Regex to filter benchmarks run')
argp.add_argument('--counters', dest='counters', action='store_true') argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false') argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True) argp.set_defaults(counters=True)
@ -212,6 +218,6 @@ def diff(bms, loops, regex, track, old, new, counters):
if __name__ == '__main__': if __name__ == '__main__':
args = _args() args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old, diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old,
args.new, args.counters) args.new, args.counters)
print('%s\n%s' % (note, diff if diff else "No performance differences")) print('%s\n%s' % (note, diff if diff else "No performance differences"))

@ -7341,6 +7341,7 @@
"test/core/end2end/tests/payload.c", "test/core/end2end/tests/payload.c",
"test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping.c",
"test/core/end2end/tests/ping_pong_streaming.c", "test/core/end2end/tests/ping_pong_streaming.c",
"test/core/end2end/tests/proxy_auth.c",
"test/core/end2end/tests/registered_call.c", "test/core/end2end/tests/registered_call.c",
"test/core/end2end/tests/request_with_flags.c", "test/core/end2end/tests/request_with_flags.c",
"test/core/end2end/tests/request_with_payload.c", "test/core/end2end/tests/request_with_payload.c",
@ -7418,6 +7419,7 @@
"test/core/end2end/tests/payload.c", "test/core/end2end/tests/payload.c",
"test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping.c",
"test/core/end2end/tests/ping_pong_streaming.c", "test/core/end2end/tests/ping_pong_streaming.c",
"test/core/end2end/tests/proxy_auth.c",
"test/core/end2end/tests/registered_call.c", "test/core/end2end/tests/registered_call.c",
"test/core/end2end/tests/request_with_flags.c", "test/core/end2end/tests/request_with_flags.c",
"test/core/end2end/tests/request_with_payload.c", "test/core/end2end/tests/request_with_payload.c",

@ -3993,7 +3993,8 @@
"mac", "mac",
"posix", "posix",
"windows" "windows"
] ],
"timeout_seconds": 1200
}, },
{ {
"args": [], "args": [],
@ -16351,6 +16352,30 @@
"posix" "posix"
] ]
}, },
{
"args": [
"proxy_auth"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"registered_call" "registered_call"
@ -38864,6 +38889,30 @@
"posix" "posix"
] ]
}, },
{
"args": [
"proxy_auth"
],
"ci_platforms": [
"windows",
"linux",
"posix"
],
"cpu_cost": 1.0,
"exclude_configs": [],
"exclude_iomgrs": [
"uv"
],
"flaky": false,
"language": "c",
"name": "h2_http_proxy_nosec_test",
"platforms": [
"windows",
"linux",
"mac",
"posix"
]
},
{ {
"args": [ "args": [
"registered_call" "registered_call"

@ -231,6 +231,8 @@
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c">

@ -121,6 +121,9 @@
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
<Filter>test\core\end2end\tests</Filter> <Filter>test\core\end2end\tests</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
<Filter>test\core\end2end\tests</Filter> <Filter>test\core\end2end\tests</Filter>
</ClCompile> </ClCompile>

@ -233,6 +233,8 @@
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\request_with_flags.c">

@ -124,6 +124,9 @@
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\ping_pong_streaming.c">
<Filter>test\core\end2end\tests</Filter> <Filter>test\core\end2end\tests</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\proxy_auth.c">
<Filter>test\core\end2end\tests</Filter>
</ClCompile>
<ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c"> <ClCompile Include="$(SolutionDir)\..\test\core\end2end\tests\registered_call.c">
<Filter>test\core\end2end\tests</Filter> <Filter>test\core\end2end\tests</Filter>
</ClCompile> </ClCompile>

Loading…
Cancel
Save