From ef9ccd167c1cae38a172698109075a89af61f36c Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 22 Feb 2017 16:17:46 -0800 Subject: [PATCH 01/47] Check max streams --- src/core/ext/transport/chttp2/transport/parsing.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 7ed00522c3e..035c4ef28d0 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -634,6 +634,10 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, "ignoring grpc_chttp2_stream with non-client generated index %d", t->incoming_stream_id)); return init_skip_frame_parser(exec_ctx, t, 1); + } else if (grpc_chttp2_stream_map_size(&t->stream_map) >= + t->settings[GRPC_ACKED_SETTINGS] + [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) { + return GRPC_ERROR_CREATE("Max stream count exceeded"); } t->last_new_stream_id = t->incoming_stream_id; s = t->incoming_stream = From bb742675a80925f4701a3ecce99d3b104e23c9a1 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 17 May 2017 22:19:05 +0000 Subject: [PATCH 02/47] Enable epoll1 poller --- src/core/lib/iomgr/ev_epoll1_linux.c | 5 +---- src/core/lib/iomgr/ev_epollsig_linux.c | 2 +- tools/run_tests/run_tests.py | 4 ++-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index ad69f808cdb..0f067b52a3a 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -668,9 +668,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } } else { gpr_atm_no_barrier_store(&g_active_poller, 0); - gpr_mu_unlock(&pollset->mu); size_t poller_neighbourhood_idx = (size_t)(pollset->neighbourhood - g_neighbourhoods); + gpr_mu_unlock(&pollset->mu); bool found_worker = false; bool scan_state[MAX_NEIGHBOURHOODS]; for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) { @@ -948,9 +948,6 @@ static const grpc_event_engine_vtable vtable = { /* It is possible that GLIBC has epoll but the underlying kernel doesn't. * Create a dummy epoll_fd to make sure epoll support is available */ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { - /* TODO(ctiller): temporary, until this stabilizes */ - if (!explicit_request) return NULL; - if (!grpc_has_wakeup_fd()) { return NULL; } diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c index 92c555b7eae..25a4d32cfc5 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.c +++ b/src/core/lib/iomgr/ev_epollsig_linux.c @@ -1937,7 +1937,7 @@ const grpc_event_engine_vtable *grpc_init_epollsig_linux( if (!is_grpc_wakeup_signal_initialized) { /* TODO(ctiller): when other epoll engines are ready, remove the true || to * force this to be explitly chosen if needed */ - if (true || explicit_request) { + if (explicit_request) { grpc_use_signal(SIGRTMIN + 6); } else { return NULL; diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 1a16b093259..d1a9e73aa13 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -75,8 +75,8 @@ _FORCE_ENVIRON_FOR_WRAPPERS = { _POLLING_STRATEGIES = { - 'linux': ['epollsig', 'poll', 'poll-cv'], -# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool + 'linux': ['epoll1', 'epollsig', 'poll', 'poll-cv'], +# TODO(ctiller, sreecha): enable epollex, epoll-thread-pool 'mac': ['poll'], } From 2b73c4434f92e0efd8722da722124950338f84f5 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 18 May 2017 16:25:34 -0700 Subject: [PATCH 03/47] Extend test timeout --- build.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/build.yaml b/build.yaml index ecd9ebe5b7b..98f0dd739d9 100644 --- a/build.yaml +++ b/build.yaml @@ -4389,6 +4389,7 @@ targets: cpu_cost: 100 build: test language: c++ + timeout_seconds: 1200 src: - test/cpp/end2end/thread_stress_test.cc deps: From 058bd64b00ea7bdceaa83063466a83a1c4fac8c3 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 18 May 2017 16:26:15 -0700 Subject: [PATCH 04/47] Generate projects --- build.yaml | 2 +- tools/run_tests/generated/tests.json | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/build.yaml b/build.yaml index 98f0dd739d9..14cbe954691 100644 --- a/build.yaml +++ b/build.yaml @@ -4389,7 +4389,6 @@ targets: cpu_cost: 100 build: test language: c++ - timeout_seconds: 1200 src: - test/cpp/end2end/thread_stress_test.cc deps: @@ -4399,6 +4398,7 @@ targets: - grpc - gpr_test_util - gpr + timeout_seconds: 1200 - name: writes_per_rpc_test gtest: true cpu_cost: 0.5 diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index d1e3a99a084..f91913c69f6 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -3889,7 +3889,8 @@ "mac", "posix", "windows" - ] + ], + "timeout_seconds": 1200 }, { "args": [], From 55624a3a99f47a528b19c40f16901f560ccd51a5 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 26 May 2017 08:14:44 -0700 Subject: [PATCH 05/47] Fix some kicking bugs --- src/core/lib/iomgr/ev_epoll1_linux.c | 74 +++++++++++++++++++--------- 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 0f067b52a3a..64eef092fab 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -95,6 +95,7 @@ typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state; struct grpc_pollset_worker { kick_state kick_state; + int kick_state_mutator; // which line of code last changed kick state bool initialized_cv; grpc_pollset_worker *next; grpc_pollset_worker *prev; @@ -102,6 +103,12 @@ struct grpc_pollset_worker { grpc_closure_list schedule_on_end_work; }; +#define SET_KICK_STATE(worker, state) \ + do { \ + (worker)->kick_state = (state); \ + (worker)->kick_state_mutator = __LINE__; \ + } while (false) + #define MAX_NEIGHBOURHOODS 1024 typedef struct pollset_neighbourhood { @@ -431,13 +438,20 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) { if (pollset->root_worker != NULL) { grpc_pollset_worker *worker = pollset->root_worker; do { - if (worker->initialized_cv) { - worker->kick_state = KICKED; - gpr_cv_signal(&worker->cv); - } else { - worker->kick_state = KICKED; - append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), - "pollset_shutdown"); + switch (worker->kick_state) { + case KICKED: + break; + case UNKICKED: + SET_KICK_STATE(worker, KICKED); + if (worker->initialized_cv) { + gpr_cv_signal(&worker->cv); + } + break; + case DESIGNATED_POLLER: + SET_KICK_STATE(worker, KICKED); + append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), + "pollset_shutdown"); + break; } worker = worker->next; @@ -534,7 +548,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) { if (worker_hdl != NULL) *worker_hdl = worker; worker->initialized_cv = false; - worker->kick_state = UNKICKED; + SET_KICK_STATE(worker, UNKICKED); worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; pollset->begin_refs++; @@ -563,8 +577,9 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->seen_inactive = false; if (neighbourhood->active_root == NULL) { neighbourhood->active_root = pollset->next = pollset->prev = pollset; - if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { - worker->kick_state = DESIGNATED_POLLER; + if (worker->kick_state == UNKICKED && + gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { + SET_KICK_STATE(worker, DESIGNATED_POLLER); } } else { pollset->next = neighbourhood->active_root; @@ -588,7 +603,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->shutdown_closure == NULL) { if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && worker->kick_state == UNKICKED) { - worker->kick_state = KICKED; + SET_KICK_STATE(worker, KICKED); } } *now = gpr_now(now->clock_type); @@ -615,7 +630,7 @@ static bool check_neighbourhood_for_available_poller( case UNKICKED: if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)inspect_worker)) { - inspect_worker->kick_state = DESIGNATED_POLLER; + SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); if (inspect_worker->initialized_cv) { gpr_cv_signal(&inspect_worker->cv); } @@ -652,14 +667,14 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl) { if (worker_hdl != NULL) *worker_hdl = NULL; - worker->kick_state = KICKED; + SET_KICK_STATE(worker, KICKED); grpc_closure_list_move(&worker->schedule_on_end_work, &exec_ctx->closure_list); if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (worker->next != worker && worker->next->kick_state == UNKICKED) { GPR_ASSERT(worker->next->initialized_cv); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); - worker->next->kick_state = DESIGNATED_POLLER; + SET_KICK_STATE(worker->next, DESIGNATED_POLLER); gpr_cv_signal(&worker->next->cv); if (grpc_exec_ctx_has_work(exec_ctx)) { gpr_mu_unlock(&pollset->mu); @@ -752,38 +767,53 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, return GRPC_ERROR_NONE; } grpc_pollset_worker *next_worker = root_worker->next; - if (root_worker == next_worker && + if (root_worker == next_worker && // only try and wake up a poller if + // there is no next worker root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( &g_active_poller)) { - root_worker->kick_state = KICKED; + SET_KICK_STATE(root_worker, KICKED); return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (next_worker->kick_state == UNKICKED) { GPR_ASSERT(next_worker->initialized_cv); - next_worker->kick_state = KICKED; + SET_KICK_STATE(next_worker, KICKED); gpr_cv_signal(&next_worker->cv); return GRPC_ERROR_NONE; + } else if (next_worker->kick_state == DESIGNATED_POLLER) { + if (root_worker->kick_state != DESIGNATED_POLLER) { + SET_KICK_STATE(root_worker, KICKED); + if (root_worker->initialized_cv) { + gpr_cv_signal(&root_worker->cv); + } + return GRPC_ERROR_NONE; + } else { + SET_KICK_STATE(next_worker, KICKED); + return grpc_wakeup_fd_wakeup(&global_wakeup_fd); + } } else { + GPR_ASSERT(next_worker->kick_state == KICKED); + SET_KICK_STATE(next_worker, KICKED); return GRPC_ERROR_NONE; } } else { + GPR_ASSERT(false); return GRPC_ERROR_NONE; } } else if (specific_worker->kick_state == KICKED) { return GRPC_ERROR_NONE; } else if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { - specific_worker->kick_state = KICKED; + SET_KICK_STATE(specific_worker, KICKED); return GRPC_ERROR_NONE; } else if (specific_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { - specific_worker->kick_state = KICKED; + SET_KICK_STATE(specific_worker, KICKED); return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (specific_worker->initialized_cv) { - specific_worker->kick_state = KICKED; + SET_KICK_STATE(specific_worker, KICKED); gpr_cv_signal(&specific_worker->cv); return GRPC_ERROR_NONE; } else { - specific_worker->kick_state = KICKED; + SET_KICK_STATE(specific_worker, KICKED); return GRPC_ERROR_NONE; } } @@ -830,7 +860,7 @@ static void wq_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_pollset_worker *inspect_worker = inspect->root_worker; do { if (inspect_worker->kick_state == UNKICKED) { - inspect_worker->kick_state = KICKED; + SET_KICK_STATE(inspect_worker, KICKED); grpc_closure_list_append( &inspect_worker->schedule_on_end_work, closure, error); if (inspect_worker->initialized_cv) { From b89bac0752c5c5fbd8e54db684062976c3a3365f Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 26 May 2017 15:20:32 +0000 Subject: [PATCH 06/47] Add tracing for epoll1 --- src/core/lib/iomgr/ev_epoll1_linux.c | 30 ++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 0f067b52a3a..ea755544c0b 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -61,6 +61,7 @@ #include "src/core/lib/iomgr/workqueue.h" #include "src/core/lib/profiling/timers.h" #include "src/core/lib/support/block_annotate.h" +#include "src/core/lib/support/string.h" static grpc_wakeup_fd global_wakeup_fd; static int g_epfd; @@ -744,45 +745,74 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, static grpc_error *pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_strvec log; + gpr_strvec_init(&log); + char *tmp; + gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset, specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset), (void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker); + gpr_strvec_add(&log, tmp); + if (pollset->root_worker != NULL) { + gpr_asprintf(&tmp, " {kicked=%d next=%p {kicked=%d}}", pollset->root_worker->kick_state, pollset->root_worker->next, pollset->root_worker->next->kick_state); + gpr_strvec_add(&log, tmp); + } + if (specific_worker != NULL) { + gpr_asprintf(&tmp, " worker_kicked=%d", specific_worker->kick_state); + gpr_strvec_add(&log, tmp); + } + tmp = gpr_strvec_flatten(&log, NULL); + gpr_strvec_destroy(&log); + gpr_log(GPR_DEBUG, "%s", tmp); + gpr_free(tmp); + } if (specific_worker == NULL) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { grpc_pollset_worker *root_worker = pollset->root_worker; if (root_worker == NULL) { pollset->kicked_without_poller = true; + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kicked_without_poller"); } return GRPC_ERROR_NONE; } grpc_pollset_worker *next_worker = root_worker->next; if (root_worker == next_worker && root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( &g_active_poller)) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kicked %p", root_worker); } root_worker->kick_state = KICKED; return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (next_worker->kick_state == UNKICKED) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kicked %p", next_worker); } GPR_ASSERT(next_worker->initialized_cv); next_worker->kick_state = KICKED; gpr_cv_signal(&next_worker->cv); return GRPC_ERROR_NONE; } else { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. do nothing??"); } return GRPC_ERROR_NONE; } } else { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. do nothing??"); } return GRPC_ERROR_NONE; } } else if (specific_worker->kick_state == KICKED) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. specific worker already kicked"); } return GRPC_ERROR_NONE; } else if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker); } specific_worker->kick_state = KICKED; return GRPC_ERROR_NONE; } else if (specific_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kick active poller"); } specific_worker->kick_state = KICKED; return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (specific_worker->initialized_cv) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kick waiting worker"); } specific_worker->kick_state = KICKED; gpr_cv_signal(&specific_worker->cv); return GRPC_ERROR_NONE; } else { + if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, " .. kick non-waiting worker"); } specific_worker->kick_state = KICKED; return GRPC_ERROR_NONE; } From dcb6262019ba22e36eaafcea089ef70e42325991 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 26 May 2017 09:47:38 -0700 Subject: [PATCH 07/47] Make race conditions more prevalent --- test/core/surface/completion_queue_threading_test.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/core/surface/completion_queue_threading_test.c b/test/core/surface/completion_queue_threading_test.c index bff69ec74fd..4ede629139f 100644 --- a/test/core/surface/completion_queue_threading_test.c +++ b/test/core/surface/completion_queue_threading_test.c @@ -205,7 +205,8 @@ static void consumer_thread(void *arg) { gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); for (;;) { - ev = grpc_completion_queue_next(opt->cc, ten_seconds_time(), NULL); + ev = grpc_completion_queue_next(opt->cc, + gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL); switch (ev.type) { case GRPC_OP_COMPLETE: GPR_ASSERT(ev.success); From c81512a2c548813a4db063f0ee6ab73edb84cfc0 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 26 May 2017 09:53:58 -0700 Subject: [PATCH 08/47] Fix race on shutdown --- src/core/lib/iomgr/ev_epoll1_linux.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index effa9bde8a3..24f40ec040a 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -473,7 +473,9 @@ static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx, static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(pollset->shutdown_closure == NULL); + GPR_ASSERT(!pollset->shutting_down); pollset->shutdown_closure = closure; + pollset->shutting_down = true; GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset)); pollset_maybe_finish_shutdown(exec_ctx, pollset); } @@ -600,8 +602,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); worker->initialized_cv = true; gpr_cv_init(&worker->cv); - while (worker->kick_state == UNKICKED && - pollset->shutdown_closure == NULL) { + while (worker->kick_state == UNKICKED && !pollset->shutting_down) { if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && worker->kick_state == UNKICKED) { SET_KICK_STATE(worker, KICKED); @@ -610,8 +611,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, *now = gpr_now(now->clock_type); } - return worker->kick_state == DESIGNATED_POLLER && - pollset->shutdown_closure == NULL; + return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down; } static bool check_neighbourhood_for_available_poller( @@ -745,7 +745,7 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) { gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); - GPR_ASSERT(!pollset->shutdown_closure); + GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->seen_inactive); gpr_mu_unlock(&pollset->mu); append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline), From 3e7059bb6b90ebdb422daf6a1c94ada6c7821052 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 26 May 2017 13:57:54 -0700 Subject: [PATCH 09/47] Remove accidental debug --- src/core/lib/iomgr/ev_epoll1_linux.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 24f40ec040a..5ac2d4d453f 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -836,7 +836,6 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, return GRPC_ERROR_NONE; } } else { - GPR_ASSERT(false); return GRPC_ERROR_NONE; } } else if (specific_worker->kick_state == KICKED) { From ac0d6ca68aa0a860a4e86575907d7d0e42cf8406 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 31 May 2017 16:22:30 +0000 Subject: [PATCH 10/47] Fix compile --- src/core/lib/surface/completion_queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c index 651afe685c3..b31ee2cd268 100644 --- a/src/core/lib/surface/completion_queue.c +++ b/src/core/lib/surface/completion_queue.c @@ -587,7 +587,7 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, /* Only kick if this is the first item queued */ if (is_first) { gpr_mu_lock(cqd->mu); - grpc_error *kick_error = cq->poller_vtable->kick(POLLSET_FROM_CQ(cq); + grpc_error *kick_error = cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), NULL); gpr_mu_unlock(cqd->mu); if (kick_error != GRPC_ERROR_NONE) { From 830e82ad122fbdbe8bcb5cd09b333fcb4515d452 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 31 May 2017 16:26:27 -0700 Subject: [PATCH 11/47] Fixes, debug --- src/core/lib/iomgr/ev_epoll1_linux.c | 129 ++++++++++++++---- .../iomgr/ev_epoll_limited_pollers_linux.c | 8 +- .../lib/iomgr/ev_epoll_thread_pool_linux.c | 8 +- src/core/lib/iomgr/ev_epollex_linux.c | 8 +- src/core/lib/iomgr/ev_epollsig_linux.c | 8 +- src/core/lib/iomgr/lockfree_event.c | 14 +- src/core/lib/iomgr/lockfree_event.h | 5 +- 7 files changed, 130 insertions(+), 50 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 5ac2d4d453f..379b8750145 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -94,6 +94,18 @@ static void fd_global_shutdown(void); typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state; +static const char *kick_state_string(kick_state st) { + switch (st) { + case UNKICKED: + return "UNKICKED"; + case KICKED: + return "KICKED"; + case DESIGNATED_POLLER: + return "DESIGNATED_POLLER"; + } + GPR_UNREACHABLE_CODE(return "UNKNOWN"); +} + struct grpc_pollset_worker { kick_state kick_state; int kick_state_mutator; // which line of code last changed kick state @@ -217,7 +229,7 @@ static grpc_fd *fd_create(int fd, const char *name) { gpr_asprintf(&fd_name, "%s fd=%d", name, fd); grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name); #ifdef GRPC_FD_REF_COUNT_DEBUG - gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name); + gpr_log(GPR_ERROR, "FD %d %p create %s", fd, (void *)new_fd, fd_name); #endif gpr_free(fd_name); @@ -283,12 +295,12 @@ static bool fd_is_shutdown(grpc_fd *fd) { static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -297,7 +309,7 @@ static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure); + grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -309,7 +321,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); } /******************************************************************************* @@ -555,6 +567,10 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; pollset->begin_refs++; + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker); + } + if (pollset->seen_inactive) { // pollset has been observed to be inactive, we need to move back to the // active list @@ -570,6 +586,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, retry_lock_neighbourhood: gpr_mu_lock(&neighbourhood->mu); gpr_mu_lock(&pollset->mu); + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d", + pollset, worker, kick_state_string(worker->kick_state), + is_reassigning); + } if (pollset->seen_inactive) { if (neighbourhood != pollset->neighbourhood) { gpr_mu_unlock(&neighbourhood->mu); @@ -603,6 +624,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, worker->initialized_cv = true; gpr_cv_init(&worker->cv); while (worker->kick_state == UNKICKED && !pollset->shutting_down) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", + pollset, worker, kick_state_string(worker->kick_state), + pollset->shutting_down); + } if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && worker->kick_state == UNKICKED) { SET_KICK_STATE(worker, KICKED); @@ -610,6 +636,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, } *now = gpr_now(now->clock_type); } + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d", pollset, + worker, kick_state_string(worker->kick_state), + pollset->shutting_down); + } return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down; } @@ -631,10 +662,18 @@ static bool check_neighbourhood_for_available_poller( case UNKICKED: if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)inspect_worker)) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, " .. choose next poller to be %p", + inspect_worker); + } SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); if (inspect_worker->initialized_cv) { gpr_cv_signal(&inspect_worker->cv); } + } else { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, " .. beaten to choose next poller"); + } } // even if we didn't win the cas, there's a worker, we can stop found_worker = true; @@ -647,9 +686,12 @@ static bool check_neighbourhood_for_available_poller( break; } inspect_worker = inspect_worker->next; - } while (inspect_worker != inspect->root_worker); + } while (!found_worker && inspect_worker != inspect->root_worker); } if (!found_worker) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect); + } inspect->seen_inactive = true; if (inspect == neighbourhood->active_root) { neighbourhood->active_root = @@ -667,12 +709,19 @@ static bool check_neighbourhood_for_available_poller( static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker); + } if (worker_hdl != NULL) *worker_hdl = NULL; + /* Make sure we appear kicked */ SET_KICK_STATE(worker, KICKED); grpc_closure_list_move(&worker->schedule_on_end_work, &exec_ctx->closure_list); if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (worker->next != worker && worker->next->kick_state == UNKICKED) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker); + } GPR_ASSERT(worker->next->initialized_cv); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); SET_KICK_STATE(worker->next, DESIGNATED_POLLER); @@ -722,6 +771,9 @@ static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, if (worker->initialized_cv) { gpr_cv_destroy(&worker->cv); } + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_DEBUG, " .. remove worker"); + } if (EMPTIED == worker_remove(pollset, worker)) { pollset_maybe_finish_shutdown(exec_ctx, pollset); } @@ -742,8 +794,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, pollset->kicked_without_poller = false; return GRPC_ERROR_NONE; } - gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) { + gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker); GPR_ASSERT(!pollset->shutting_down); GPR_ASSERT(!pollset->seen_inactive); @@ -752,6 +804,8 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, err_desc); gpr_mu_lock(&pollset->mu); gpr_tls_set(&g_current_thread_worker, 0); + } else { + gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset); } end_worker(exec_ctx, pollset, &worker, worker_hdl); gpr_tls_set(&g_current_thread_pollset, 0); @@ -770,18 +824,20 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, (void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker); gpr_strvec_add(&log, tmp); if (pollset->root_worker != NULL) { - gpr_asprintf(&tmp, " {kicked=%d next=%p {kicked=%d}}", - pollset->root_worker->kick_state, pollset->root_worker->next, - pollset->root_worker->next->kick_state); + gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}", + kick_state_string(pollset->root_worker->kick_state), + pollset->root_worker->next, + kick_state_string(pollset->root_worker->next->kick_state)); gpr_strvec_add(&log, tmp); } if (specific_worker != NULL) { - gpr_asprintf(&tmp, " worker_kicked=%d", specific_worker->kick_state); + gpr_asprintf(&tmp, " worker_kick_state=%s", + kick_state_string(specific_worker->kick_state)); gpr_strvec_add(&log, tmp); } tmp = gpr_strvec_flatten(&log, NULL); gpr_strvec_destroy(&log); - gpr_log(GPR_DEBUG, "%s", tmp); + gpr_log(GPR_ERROR, "%s", tmp); gpr_free(tmp); } if (specific_worker == NULL) { @@ -790,23 +846,36 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, if (root_worker == NULL) { pollset->kicked_without_poller = true; if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kicked_without_poller"); + gpr_log(GPR_ERROR, " .. kicked_without_poller"); } return GRPC_ERROR_NONE; } grpc_pollset_worker *next_worker = root_worker->next; - if (root_worker == next_worker && // only try and wake up a poller if - // there is no next worker - root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( - &g_active_poller)) { + if (root_worker->kick_state == KICKED) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kicked %p", root_worker); + gpr_log(GPR_ERROR, " .. already kicked %p", root_worker); + } + SET_KICK_STATE(root_worker, KICKED); + return GRPC_ERROR_NONE; + } else if (next_worker->kick_state == KICKED) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, " .. already kicked %p", next_worker); + } + SET_KICK_STATE(next_worker, KICKED); + return GRPC_ERROR_NONE; + } else if (root_worker == + next_worker && // only try and wake up a poller if + // there is no next worker + root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load( + &g_active_poller)) { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, " .. kicked %p", root_worker); } SET_KICK_STATE(root_worker, KICKED); return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (next_worker->kick_state == UNKICKED) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kicked %p", next_worker); + gpr_log(GPR_ERROR, " .. kicked %p", next_worker); } GPR_ASSERT(next_worker->initialized_cv); SET_KICK_STATE(next_worker, KICKED); @@ -815,7 +884,10 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, } else if (next_worker->kick_state == DESIGNATED_POLLER) { if (root_worker->kick_state != DESIGNATED_POLLER) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kicked root non-poller %p", next_worker); + gpr_log( + GPR_ERROR, + " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)", + root_worker, root_worker->initialized_cv, next_worker); } SET_KICK_STATE(root_worker, KICKED); if (root_worker->initialized_cv) { @@ -824,7 +896,7 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, return GRPC_ERROR_NONE; } else { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker, + gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker, root_worker); } SET_KICK_STATE(next_worker, KICKED); @@ -836,37 +908,40 @@ static grpc_error *pollset_kick(grpc_pollset *pollset, return GRPC_ERROR_NONE; } } else { + if (GRPC_TRACER_ON(grpc_polling_trace)) { + gpr_log(GPR_ERROR, " .. kicked while waking up"); + } return GRPC_ERROR_NONE; } } else if (specific_worker->kick_state == KICKED) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. specific worker already kicked"); + gpr_log(GPR_ERROR, " .. specific worker already kicked"); } return GRPC_ERROR_NONE; } else if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker); + gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker); } SET_KICK_STATE(specific_worker, KICKED); return GRPC_ERROR_NONE; } else if (specific_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kick active poller"); + gpr_log(GPR_ERROR, " .. kick active poller"); } SET_KICK_STATE(specific_worker, KICKED); return grpc_wakeup_fd_wakeup(&global_wakeup_fd); } else if (specific_worker->initialized_cv) { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kick waiting worker"); + gpr_log(GPR_ERROR, " .. kick waiting worker"); } SET_KICK_STATE(specific_worker, KICKED); gpr_cv_signal(&specific_worker->cv); return GRPC_ERROR_NONE; } else { if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, " .. kick non-waiting worker"); + gpr_log(GPR_ERROR, " .. kick non-waiting worker"); } SET_KICK_STATE(specific_worker, KICKED); return GRPC_ERROR_NONE; @@ -1051,6 +1126,8 @@ const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) { return NULL; } + gpr_log(GPR_ERROR, "grpc epoll fd: %d", g_epfd); + return &vtable; } diff --git a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c index d23bf6c06cb..4a17f7d07c4 100644 --- a/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c +++ b/src/core/lib/iomgr/ev_epoll_limited_pollers_linux.c @@ -1145,12 +1145,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -1369,7 +1369,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure); + grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -1381,7 +1381,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c index bb44321922a..adc5e796440 100644 --- a/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c +++ b/src/core/lib/iomgr/ev_epoll_thread_pool_linux.c @@ -672,12 +672,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { return NULL; } @@ -810,11 +810,11 @@ static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) { } static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure); + grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); } static void pollset_release_epoll_set(grpc_exec_ctx *exec_ctx, grpc_pollset *ps, diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c index 7cb6085e255..b0d1a93886c 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.c +++ b/src/core/lib/iomgr/ev_epollex_linux.c @@ -438,12 +438,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -780,7 +780,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure); + grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -792,7 +792,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); } static grpc_error *fd_become_pollable_locked(grpc_fd *fd) { diff --git a/src/core/lib/iomgr/ev_epollsig_linux.c b/src/core/lib/iomgr/ev_epollsig_linux.c index 25a4d32cfc5..2f61cd5164c 100644 --- a/src/core/lib/iomgr/ev_epollsig_linux.c +++ b/src/core/lib/iomgr/ev_epollsig_linux.c @@ -1073,12 +1073,12 @@ static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) { static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read"); } static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *closure) { - grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure); + grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write"); } static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) { @@ -1263,7 +1263,7 @@ static int poll_deadline_to_millis_timeout(gpr_timespec deadline, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { - grpc_lfev_set_ready(exec_ctx, &fd->read_closure); + grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); /* Note, it is possible that fd_become_readable might be called twice with different 'notifier's when an fd becomes readable and it is in two epoll @@ -1275,7 +1275,7 @@ static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, } static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) { - grpc_lfev_set_ready(exec_ctx, &fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write"); } static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx, diff --git a/src/core/lib/iomgr/lockfree_event.c b/src/core/lib/iomgr/lockfree_event.c index 898ec1cb1bf..722031a5dab 100644 --- a/src/core/lib/iomgr/lockfree_event.c +++ b/src/core/lib/iomgr/lockfree_event.c @@ -94,12 +94,12 @@ bool grpc_lfev_is_shutdown(gpr_atm *state) { } void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure) { + grpc_closure *closure, const char *variable) { while (true) { gpr_atm curr = gpr_atm_no_barrier_load(state); if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "lfev_notify_on: %p curr=%p closure=%p", state, - (void *)curr, closure); + gpr_log(GPR_ERROR, "lfev_notify_on[%s]: %p curr=%p closure=%p", variable, + state, (void *)curr, closure); } switch (curr) { case CLOSURE_NOT_READY: { @@ -164,7 +164,7 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, while (true) { gpr_atm curr = gpr_atm_no_barrier_load(state); if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "lfev_set_shutdown: %p curr=%p err=%s", state, + gpr_log(GPR_ERROR, "lfev_set_shutdown: %p curr=%p err=%s", state, (void *)curr, grpc_error_string(shutdown_err)); } switch (curr) { @@ -208,12 +208,14 @@ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, GPR_UNREACHABLE_CODE(return false); } -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state) { +void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, + const char *variable) { while (true) { gpr_atm curr = gpr_atm_no_barrier_load(state); if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_DEBUG, "lfev_set_ready: %p curr=%p", state, (void *)curr); + gpr_log(GPR_ERROR, "lfev_set_ready[%s]: %p curr=%p", variable, state, + (void *)curr); } switch (curr) { diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h index 1d9119204ca..89b3d8cf229 100644 --- a/src/core/lib/iomgr/lockfree_event.h +++ b/src/core/lib/iomgr/lockfree_event.h @@ -45,10 +45,11 @@ void grpc_lfev_destroy(gpr_atm *state); bool grpc_lfev_is_shutdown(gpr_atm *state); void grpc_lfev_notify_on(grpc_exec_ctx *exec_ctx, gpr_atm *state, - grpc_closure *closure); + grpc_closure *closure, const char *variable); /* Returns true on first successful shutdown */ bool grpc_lfev_set_shutdown(grpc_exec_ctx *exec_ctx, gpr_atm *state, grpc_error *shutdown_err); -void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state); +void grpc_lfev_set_ready(grpc_exec_ctx *exec_ctx, gpr_atm *state, + const char *variable); #endif /* GRPC_CORE_LIB_IOMGR_LOCKFREE_EVENT_H */ From 148970b2aa0d2b86932b9332744b5d6c19d77c89 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 6 Jun 2017 15:39:39 +0000 Subject: [PATCH 12/47] Fix test --- test/core/surface/completion_queue_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/core/surface/completion_queue_test.c b/test/core/surface/completion_queue_test.c index 35bda5b6418..6fee4c55233 100644 --- a/test/core/surface/completion_queue_test.c +++ b/test/core/surface/completion_queue_test.c @@ -108,7 +108,7 @@ static void test_pollset_conversion(void) { attr.cq_polling_type = polling_types[j]; cq = grpc_completion_queue_create( grpc_completion_queue_factory_lookup(&attr), &attr, NULL); - GPR_ASSERT(grpc_cq_from_pollset(grpc_cq_pollset(cq)) == cq); + GPR_ASSERT(grpc_cq_pollset(cq) != NULL); shutdown_and_destroy(cq); } } From 4cc2b9accae8f505f05d79ebc91622757ac6ac92 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 6 Jul 2017 08:29:45 -0700 Subject: [PATCH 13/47] Fix accidental boringssl change --- third_party/boringssl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/boringssl b/third_party/boringssl index 78684e5b222..be2ee342d37 160000 --- a/third_party/boringssl +++ b/third_party/boringssl @@ -1 +1 @@ -Subproject commit 78684e5b222645828ca302e56b40b9daff2b2d27 +Subproject commit be2ee342d3781ddb954f91f8a7e660c6f59e87e5 From fb1e164cd81bd15b7d2aad595510e0c083fb3d5b Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 11 Jul 2017 17:49:21 -0700 Subject: [PATCH 14/47] dont wait for gc to destroy calls on ruby server --- src/ruby/lib/grpc/generic/active_call.rb | 141 +++++++++++++++++----- src/ruby/lib/grpc/generic/bidi_call.rb | 62 +++++----- src/ruby/lib/grpc/generic/rpc_desc.rb | 4 +- src/ruby/spec/generic/active_call_spec.rb | 4 +- src/ruby/spec/generic/client_stub_spec.rb | 136 +++++++++++++++------ src/ruby/spec/generic/rpc_desc_spec.rb | 10 +- 6 files changed, 249 insertions(+), 108 deletions(-) diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index cb407d236d1..96c773a995d 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -40,7 +40,7 @@ end module GRPC # The ActiveCall class provides simple methods for sending marshallable # data to a call - class ActiveCall + class ActiveCall # rubocop:disable Metrics/ClassLength include Core::TimeConsts include Core::CallOps extend Forwardable @@ -100,6 +100,11 @@ module GRPC fail(ArgumentError, 'Already sent md') if started && metadata_to_send @metadata_to_send = metadata_to_send || {} unless started @send_initial_md_mutex = Mutex.new + + @output_stream_done = false + @input_stream_done = false + @call_finished = false + @call_finished_mu = Mutex.new end # Sends the initial metadata that has yet to be sent. @@ -142,11 +147,9 @@ module GRPC Operation.new(self) end - # finished waits until a client call is completed. - # - # It blocks until the remote endpoint acknowledges by sending a status. - def finished + def receive_and_check_status batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil) + set_input_stream_done attach_status_results_and_complete_call(batch_result) end @@ -155,8 +158,6 @@ module GRPC @call.trailing_metadata = recv_status_batch_result.status.metadata end @call.status = recv_status_batch_result.status - @call.close - op_is_done # The RECV_STATUS in run_batch always succeeds # Check the status for a bad status or failed run batch @@ -193,9 +194,19 @@ module GRPC } ops[RECV_CLOSE_ON_SERVER] = nil if assert_finished @call.run_batch(ops) + set_output_stream_done + nil end + # Intended for use on server-side calls when a single request from + # the client is expected (i.e., unary and server-streaming RPC types). + def read_unary_request + req = remote_read + set_input_stream_done + req + end + def server_unary_response(req, trailing_metadata: {}, code: Core::StatusCodes::OK, details: 'OK') ops = {} @@ -211,6 +222,7 @@ module GRPC ops[RECV_CLOSE_ON_SERVER] = nil @call.run_batch(ops) + set_output_stream_done end # remote_read reads a response from the remote endpoint. @@ -241,6 +253,8 @@ module GRPC # each_remote_read passes each response to the given block or returns an # enumerator the responses if no block is given. + # Used to generate the request enumerable for + # server-side client-streaming RPC's. # # == Enumerator == # @@ -258,10 +272,14 @@ module GRPC # @return [Enumerator] if no block was given def each_remote_read return enum_for(:each_remote_read) unless block_given? - loop do - resp = remote_read - break if resp.nil? # the last response was received - yield resp + begin + loop do + resp = remote_read + break if resp.nil? # the last response was received + yield resp + end + ensure + set_input_stream_done end end @@ -287,13 +305,17 @@ module GRPC # @return [Enumerator] if no block was given def each_remote_read_then_finish return enum_for(:each_remote_read_then_finish) unless block_given? - loop do - resp = remote_read - if resp.nil? # the last response was received, but not finished yet - finished - break + begin + loop do + resp = remote_read + if resp.nil? # the last response was received + receive_and_check_status + break + end + yield resp end - yield resp + ensure + set_input_stream_done end end @@ -319,7 +341,15 @@ module GRPC end @metadata_sent = true end - batch_result = @call.run_batch(ops) + + begin + batch_result = @call.run_batch(ops) + # no need to check for cancellation after a CallError because this + # batch contains a RECV_STATUS op + ensure + set_input_stream_done + set_output_stream_done + end @call.metadata = batch_result.metadata attach_status_results_and_complete_call(batch_result) @@ -339,10 +369,19 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Object] the response received from the server def client_streamer(requests, metadata: {}) - # Metadata might have already been sent if this is an operation view - merge_metadata_and_send_if_not_already_sent(metadata) + begin + merge_metadata_and_send_if_not_already_sent(metadata) + requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) } + rescue GRPC::Core::CallError => e + receive_and_check_status # check for Cancelled + raise e + rescue => e + set_input_stream_done + raise e + ensure + set_output_stream_done + end - requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) } batch_result = @call.run_batch( SEND_CLOSE_FROM_CLIENT => nil, RECV_INITIAL_METADATA => nil, @@ -350,12 +389,11 @@ module GRPC RECV_STATUS_ON_CLIENT => nil ) + set_input_stream_done + @call.metadata = batch_result.metadata attach_status_results_and_complete_call(batch_result) get_message_from_batch_result(batch_result) - rescue GRPC::Core::CallError => e - finished # checks for Cancelled - raise e end # server_streamer sends one request to the GRPC server, which yields a @@ -384,13 +422,22 @@ module GRPC end @metadata_sent = true end - @call.run_batch(ops) + + begin + @call.run_batch(ops) + rescue GRPC::Core::CallError => e + receive_and_check_status # checks for Cancelled + raise e + rescue => e + set_input_stream_done + raise e + ensure + set_output_stream_done + end + replies = enum_for(:each_remote_read_then_finish) return replies unless block_given? replies.each { |r| yield r } - rescue GRPC::Core::CallError => e - finished # checks for Cancelled - raise e end # bidi_streamer sends a stream of requests to the GRPC server, and yields @@ -428,7 +475,10 @@ module GRPC @unmarshal, metadata_received: @metadata_received) - bd.run_on_client(requests, @op_notifier, &blk) + bd.run_on_client(requests, + proc { set_input_stream_done }, + proc { set_output_stream_done }, + &blk) end # run_server_bidi orchestrates a BiDi stream processing on a server. @@ -449,7 +499,7 @@ module GRPC metadata_received: @metadata_received, req_view: MultiReqView.new(self)) - bd.run_on_server(gen_each_reply) + bd.run_on_server(gen_each_reply, proc { set_input_stream_done }) end # Waits till an operation completes @@ -459,7 +509,8 @@ module GRPC @op_notifier.wait end - # Signals that an operation is done + # Signals that an operation is done. + # Only relevant on the client-side (this is a no-op on the server-side) def op_is_done return if @op_notifier.nil? @op_notifier.notify(self) @@ -486,6 +537,34 @@ module GRPC private + # To be called once the "input stream" has been completelly + # read through (i.e, done reading from client or received status) + # note this is idempotent + def set_input_stream_done + @call_finished_mu.synchronize do + @input_stream_done = true + maybe_finish_and_close_call_locked + end + end + + # To be called once the "output stream" has been completelly + # sent through (i.e, done sending from client or sent status) + # note this is idempotent + def set_output_stream_done + @call_finished_mu.synchronize do + @output_stream_done = true + maybe_finish_and_close_call_locked + end + end + + def maybe_finish_and_close_call_locked + return unless @output_stream_done && @input_stream_done + return if @call_finished + @call_finished = true + op_is_done + @call.close + end + # Starts the call if not already started # @param metadata [Hash] metadata to be sent to the server. If a value is # a list, multiple metadata for its key are sent diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index e54cf78969b..9e125cd986b 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -62,12 +62,19 @@ module GRPC # block that can be invoked with each response. # # @param requests the Enumerable of requests to send - # @param op_notifier a Notifier used to signal completion + # @param set_input_stream_done [Proc] called back when we're done + # reading the input stream + # @param set_input_stream_done [Proc] called back when we're done + # sending data on the output stream # @return an Enumerator of requests to yield - def run_on_client(requests, op_notifier, &blk) - @op_notifier = op_notifier - @enq_th = Thread.new { write_loop(requests) } - read_loop(&blk) + def run_on_client(requests, + set_input_stream_done, + set_output_stream_done, + &blk) + @enq_th = Thread.new do + write_loop(requests, set_output_stream_done: set_output_stream_done) + end + read_loop(set_input_stream_done, &blk) end # Begins orchestration of the Bidi stream for a server generating replies. @@ -81,12 +88,17 @@ module GRPC # produced by gen_each_reply could ignore the received_msgs # # @param gen_each_reply [Proc] generates the BiDi stream replies. - def run_on_server(gen_each_reply) + # @param set_input_steam_done [Proc] call back to call when + # the reads have been completely read through. + def run_on_server(gen_each_reply, set_input_stream_done) # Pass in the optional call object parameter if possible if gen_each_reply.arity == 1 - replys = gen_each_reply.call(read_loop(is_client: false)) + replys = gen_each_reply.call( + read_loop(set_input_stream_done, is_client: false)) elsif gen_each_reply.arity == 2 - replys = gen_each_reply.call(read_loop(is_client: false), @req_view) + replys = gen_each_reply.call( + read_loop(set_input_stream_done, is_client: false), + @req_view) else fail 'Illegal arity of reply generator' end @@ -99,22 +111,6 @@ module GRPC END_OF_READS = :end_of_reads END_OF_WRITES = :end_of_writes - # signals that bidi operation is complete - def notify_done - return unless @op_notifier - GRPC.logger.debug("bidi-notify-done: notifying #{@op_notifier}") - @op_notifier.notify(self) - end - - # signals that a bidi operation is complete (read + write) - def finished - @done_mutex.synchronize do - return unless @reads_complete && @writes_complete && !@complete - @call.close - @complete = true - end - end - # performs a read using @call.run_batch, ensures metadata is set up def read_using_run_batch ops = { RECV_MESSAGE => nil } @@ -127,7 +123,8 @@ module GRPC batch_result end - def write_loop(requests, is_client: true) + # set_output_stream_done is relevant on client-side + def write_loop(requests, is_client: true, set_output_stream_done: nil) GRPC.logger.debug('bidi-write-loop: starting') count = 0 requests.each do |req| @@ -151,23 +148,20 @@ module GRPC GRPC.logger.debug("bidi-write-loop: client sent #{count}, waiting") @call.run_batch(SEND_CLOSE_FROM_CLIENT => nil) GRPC.logger.debug('bidi-write-loop: done') - notify_done - @writes_complete = true - finished end GRPC.logger.debug('bidi-write-loop: finished') rescue StandardError => e GRPC.logger.warn('bidi-write-loop: failed') GRPC.logger.warn(e) - notify_done - @writes_complete = true - finished raise e + ensure + set_output_stream_done.call if is_client end # Provides an enumerator that yields results of remote reads - def read_loop(is_client: true) + def read_loop(set_input_stream_done, is_client: true) return enum_for(:read_loop, + set_input_stream_done, is_client: is_client) unless block_given? GRPC.logger.debug('bidi-read-loop: starting') begin @@ -201,10 +195,10 @@ module GRPC GRPC.logger.warn('bidi: read-loop failed') GRPC.logger.warn(e) raise e + ensure + set_input_stream_done.call end GRPC.logger.debug('bidi-read-loop: finished') - @reads_complete = true - finished # Make sure that the write loop is done done before finishing the call. # Note that blocking is ok at this point because we've already received # a status diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb index ce0097573a6..89cf8ff6a0a 100644 --- a/src/ruby/lib/grpc/generic/rpc_desc.rb +++ b/src/ruby/lib/grpc/generic/rpc_desc.rb @@ -48,7 +48,7 @@ module GRPC end def handle_request_response(active_call, mth) - req = active_call.remote_read + req = active_call.read_unary_request resp = mth.call(req, active_call.single_req_view) active_call.server_unary_response( resp, trailing_metadata: active_call.output_metadata) @@ -61,7 +61,7 @@ module GRPC end def handle_server_streamer(active_call, mth) - req = active_call.remote_read + req = active_call.read_unary_request replys = mth.call(req, active_call.single_req_view) replys.each { |r| active_call.remote_send(r) } send_status(active_call, OK, 'OK', active_call.output_metadata) diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb index 72e55ebcce0..ec0c2941741 100644 --- a/src/ruby/spec/generic/active_call_spec.rb +++ b/src/ruby/spec/generic/active_call_spec.rb @@ -473,7 +473,7 @@ describe GRPC::ActiveCall do server_call.remote_send('server_response') expect(client_call.remote_read).to eq('server_response') server_call.send_status(OK, 'status code is OK') - expect { client_call.finished }.to_not raise_error + expect { client_call.receive_and_check_status }.to_not raise_error end it 'finishes ok if the server sends an early status response' do @@ -490,7 +490,7 @@ describe GRPC::ActiveCall do expect do call.run_batch(CallOps::SEND_CLOSE_FROM_CLIENT => nil) end.to_not raise_error - expect { client_call.finished }.to_not raise_error + expect { client_call.receive_and_check_status }.to_not raise_error end it 'finishes ok if SEND_CLOSE and RECV_STATUS has been sent' do diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 09b88c7cef4..3b8f72eda11 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -45,6 +45,7 @@ describe 'ClientStub' do @method = 'an_rpc_method' @pass = OK @fail = INTERNAL + @metadata = { k1: 'v1', k2: 'v2' } end after(:each) do @@ -107,7 +108,7 @@ describe 'ClientStub' do end end - describe '#request_response' do + describe '#request_response', request_response: true do before(:each) do @sent_msg, @resp = 'a_msg', 'a_reply' end @@ -187,13 +188,24 @@ describe 'ClientStub' do # Kill the server thread so tests can complete th.kill end + + it 'should raise ArgumentError if metadata contains invalid values' do + @metadata.merge!(k3: 3) + server_port = create_test_server + host = "localhost:#{server_port}" + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) + expect do + get_response(stub) + end.to raise_error(ArgumentError, + /Header values must be of type string or array/) + end end describe 'without a call operation' do def get_response(stub, credentials: nil) puts credentials.inspect stub.request_response(@method, @sent_msg, noop, noop, - metadata: { k1: 'v1', k2: 'v2' }, + metadata: @metadata, credentials: credentials) end @@ -201,16 +213,19 @@ describe 'ClientStub' do end describe 'via a call operation' do + after(:each) do + # make sure op.wait doesn't hang, even if there's a bad status + @op.wait + end def get_response(stub, run_start_call_first: false, credentials: nil) - op = stub.request_response(@method, @sent_msg, noop, noop, - return_op: true, - metadata: { k1: 'v1', k2: 'v2' }, - deadline: from_relative_time(2), - credentials: credentials) - expect(op).to be_a(GRPC::ActiveCall::Operation) - op.start_call if run_start_call_first - result = op.execute - op.wait # make sure wait doesn't hang + @op = stub.request_response(@method, @sent_msg, noop, noop, + return_op: true, + metadata: @metadata, + deadline: from_relative_time(2), + credentials: credentials) + expect(@op).to be_a(GRPC::ActiveCall::Operation) + @op.start_call if run_start_call_first + result = @op.execute result end @@ -228,13 +243,12 @@ describe 'ClientStub' do end end - describe '#client_streamer' do + describe '#client_streamer', client_streamer: true do before(:each) do Thread.abort_on_exception = true server_port = create_test_server host = "localhost:#{server_port}" @stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) - @metadata = { k1: 'v1', k2: 'v2' } @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s } @resp = 'a_reply' end @@ -278,13 +292,16 @@ describe 'ClientStub' do end describe 'via a call operation' do + after(:each) do + # make sure op.wait doesn't hang, even if there's a bad status + @op.wait + end def get_response(stub, run_start_call_first: false) - op = stub.client_streamer(@method, @sent_msgs, noop, noop, - return_op: true, metadata: @metadata) - expect(op).to be_a(GRPC::ActiveCall::Operation) - op.start_call if run_start_call_first - result = op.execute - op.wait # make sure wait doesn't hang + @op = stub.client_streamer(@method, @sent_msgs, noop, noop, + return_op: true, metadata: @metadata) + expect(@op).to be_a(GRPC::ActiveCall::Operation) + @op.start_call if run_start_call_first + result = @op.execute result end @@ -298,7 +315,7 @@ describe 'ClientStub' do end end - describe '#server_streamer' do + describe '#server_streamer', server_streamer: true do before(:each) do @sent_msg = 'a_msg' @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s } @@ -334,12 +351,36 @@ describe 'ClientStub' do expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) th.join end + + it 'should raise ArgumentError if metadata contains invalid values' do + @metadata.merge!(k3: 3) + server_port = create_test_server + host = "localhost:#{server_port}" + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) + expect do + get_responses(stub) + end.to raise_error(ArgumentError, + /Header values must be of type string or array/) + end + + it 'the call terminates when there is an unmarshalling error' do + server_port = create_test_server + host = "localhost:#{server_port}" + th = run_server_streamer(@sent_msg, @replys, @pass) + stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) + + unmarshal = proc { fail(ArgumentError, 'test unmarshalling error') } + expect do + get_responses(stub, unmarshal: unmarshal).collect { |r| r } + end.to raise_error(ArgumentError, 'test unmarshalling error') + th.join + end end - describe 'without a call operation' do - def get_responses(stub) - e = stub.server_streamer(@method, @sent_msg, noop, noop, - metadata: { k1: 'v1', k2: 'v2' }) + describe 'without a call operation', test2: true do + def get_responses(stub, unmarshal: noop) + e = stub.server_streamer(@method, @sent_msg, noop, unmarshal, + metadata: @metadata) expect(e).to be_a(Enumerator) e end @@ -351,10 +392,10 @@ describe 'ClientStub' do after(:each) do @op.wait # make sure wait doesn't hang end - def get_responses(stub, run_start_call_first: false) - @op = stub.server_streamer(@method, @sent_msg, noop, noop, + def get_responses(stub, run_start_call_first: false, unmarshal: noop) + @op = stub.server_streamer(@method, @sent_msg, noop, unmarshal, return_op: true, - metadata: { k1: 'v1', k2: 'v2' }) + metadata: @metadata) expect(@op).to be_a(GRPC::ActiveCall::Operation) @op.start_call if run_start_call_first e = @op.execute @@ -377,7 +418,7 @@ describe 'ClientStub' do end end - describe '#bidi_streamer' do + describe '#bidi_streamer', bidi: true do before(:each) do @sent_msgs = Array.new(3) { |i| 'msg_' + (i + 1).to_s } @replys = Array.new(3) { |i| 'reply_' + (i + 1).to_s } @@ -386,7 +427,7 @@ describe 'ClientStub' do end shared_examples 'bidi streaming' do - it 'supports sending all the requests first', bidi: true do + it 'supports sending all the requests first' do th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys, @pass) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) @@ -395,7 +436,7 @@ describe 'ClientStub' do th.join end - it 'supports client-initiated ping pong', bidi: true do + it 'supports client-initiated ping pong' do th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) @@ -403,18 +444,39 @@ describe 'ClientStub' do th.join end - it 'supports a server-initiated ping pong', bidi: true do + it 'supports a server-initiated ping pong' do th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, false) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) expect(e.collect { |r| r }).to eq(@sent_msgs) th.join end + + it 'should raise an error if the status is not ok' do + th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @fail, false) + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) + e = get_responses(stub) + expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) + th.join + end + + # TODO: add test for metadata-related ArgumentError in a bidi call once + # issue mentioned in https://github.com/grpc/grpc/issues/10526 is fixed + + it 'should send metadata to the server ok' do + th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true, + **@metadata) + stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) + e = get_responses(stub) + expect(e.collect { |r| r }).to eq(@sent_msgs) + th.join + end end describe 'without a call operation' do def get_responses(stub) - e = stub.bidi_streamer(@method, @sent_msgs, noop, noop) + e = stub.bidi_streamer(@method, @sent_msgs, noop, noop, + metadata: @metadata) expect(e).to be_a(Enumerator) e end @@ -428,7 +490,8 @@ describe 'ClientStub' do end def get_responses(stub, run_start_call_first: false) @op = stub.bidi_streamer(@method, @sent_msgs, noop, noop, - return_op: true) + return_op: true, + metadata: @metadata) expect(@op).to be_a(GRPC::ActiveCall::Operation) @op.start_call if run_start_call_first e = @op.execute @@ -472,9 +535,14 @@ describe 'ClientStub' do end end - def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts) + def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts, + **kw) + wanted_metadata = kw.clone wakey_thread do |notifier| c = expect_server_to_be_invoked(notifier) + wanted_metadata.each do |k, v| + expect(c.metadata[k.to_s]).to eq(v) + end expected_inputs.each do |i| if client_starts expect(c.remote_read).to eq(i) diff --git a/src/ruby/spec/generic/rpc_desc_spec.rb b/src/ruby/spec/generic/rpc_desc_spec.rb index 100e9e84876..be578c40d3f 100644 --- a/src/ruby/spec/generic/rpc_desc_spec.rb +++ b/src/ruby/spec/generic/rpc_desc_spec.rb @@ -38,14 +38,14 @@ describe GRPC::RpcDesc do shared_examples 'it handles errors' do it 'sends the specified status if BadStatus is raised' do - expect(@call).to receive(:remote_read).once.and_return(Object.new) + expect(@call).to receive(:read_unary_request).once.and_return(Object.new) expect(@call).to receive(:send_status).once.with(@bs_code, 'NOK', false, metadata: {}) this_desc.run_server_method(@call, method(:bad_status)) end it 'sends status UNKNOWN if other StandardErrors are raised' do - expect(@call).to receive(:remote_read).once.and_return(Object.new) + expect(@call).to receive(:read_unary_request).once.and_return(Object.new) expect(@call).to receive(:send_status).once.with(UNKNOWN, arg_error_msg, false, metadata: {}) @@ -53,7 +53,7 @@ describe GRPC::RpcDesc do end it 'absorbs CallError with no further action' do - expect(@call).to receive(:remote_read).once.and_raise(CallError) + expect(@call).to receive(:read_unary_request).once.and_raise(CallError) blk = proc do this_desc.run_server_method(@call, method(:fake_reqresp)) end @@ -75,7 +75,7 @@ describe GRPC::RpcDesc do it 'sends a response and closes the stream if there no errors' do req = Object.new - expect(@call).to receive(:remote_read).once.and_return(req) + expect(@call).to receive(:read_unary_request).once.and_return(req) expect(@call).to receive(:output_metadata).once.and_return(fake_md) expect(@call).to receive(:server_unary_response).once .with(@ok_response, trailing_metadata: fake_md) @@ -133,7 +133,7 @@ describe GRPC::RpcDesc do it 'sends a response and closes the stream if there no errors' do req = Object.new - expect(@call).to receive(:remote_read).once.and_return(req) + expect(@call).to receive(:read_unary_request).once.and_return(req) expect(@call).to receive(:remote_send).twice.with(@ok_response) expect(@call).to receive(:output_metadata).and_return(fake_md) expect(@call).to receive(:send_status).once.with(OK, 'OK', true, From df1c5d6826af5c6cb3435ed7d4918d3cb91bb7e7 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 13 Jul 2017 16:05:01 -0700 Subject: [PATCH 15/47] Update to use correct error constructor --- src/core/ext/transport/chttp2/transport/parsing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/transport/chttp2/transport/parsing.c b/src/core/ext/transport/chttp2/transport/parsing.c index 0c8a718482e..9d46cfa22e6 100644 --- a/src/core/ext/transport/chttp2/transport/parsing.c +++ b/src/core/ext/transport/chttp2/transport/parsing.c @@ -660,7 +660,7 @@ static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx, } else if (grpc_chttp2_stream_map_size(&t->stream_map) >= t->settings[GRPC_ACKED_SETTINGS] [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) { - return GRPC_ERROR_CREATE("Max stream count exceeded"); + return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Max stream count exceeded"); } t->last_new_stream_id = t->incoming_stream_id; s = t->incoming_stream = From 8a08400af22a9ebf7e699b130a65833ce272ed87 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 14 Jul 2017 14:49:09 -0700 Subject: [PATCH 16/47] Fix spam --- tools/mkowners/mkowners.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/mkowners/mkowners.py b/tools/mkowners/mkowners.py index 2ccedfcfb8c..e0ad998bdc7 100755 --- a/tools/mkowners/mkowners.py +++ b/tools/mkowners/mkowners.py @@ -164,7 +164,6 @@ def expand_directives(root, directives): if intersect: for f in sorted(files_add): # sorted to ensure merge stability if f not in intersect: - print("X", root, glob_add, glob_have) out_globs[os.path.relpath(f, start=root)] = who_add for who in who_have: if who not in out_globs[glob_add]: @@ -185,7 +184,6 @@ def add_parent_to_globs(parent, globs, globs_dir): if intersect: for f in sorted(files_child): # sorted to ensure merge stability if f not in intersect: - print("Y", full_dir(owners.dir, oglob), full_dir(globs_dir, gglob)) who = gglob_who_orig.copy() globs[os.path.relpath(f, start=globs_dir)] = who for who in oglob_who: From c9caedd95ae9568bd05e7655002487e9900cbc39 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Fri, 14 Jul 2017 15:46:51 -0700 Subject: [PATCH 17/47] fix compiler bug in epoll-ex --- src/core/lib/iomgr/ev_epollex_linux.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/lib/iomgr/ev_epollex_linux.c b/src/core/lib/iomgr/ev_epollex_linux.c index 24994173564..a2fa4ad9a5d 100644 --- a/src/core/lib/iomgr/ev_epollex_linux.c +++ b/src/core/lib/iomgr/ev_epollex_linux.c @@ -1049,8 +1049,8 @@ static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx, /* Introduce a spurious completion. If we do not, then it may be that the fd-specific epoll set consumed a completion without being polled, leading to a missed edge going up. */ - grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure); - grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure); + grpc_lfev_set_ready(exec_ctx, &had_fd->read_closure, "read"); + grpc_lfev_set_ready(exec_ctx, &had_fd->write_closure, "write"); pollset_kick_all(exec_ctx, pollset); pollset->current_pollable = &pollset->pollable; if (append_error(&error, pollable_materialize(&pollset->pollable), From 54c31c782f069270f9ba380f6959be22e50d745b Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Mon, 17 Jul 2017 14:57:27 -0700 Subject: [PATCH 18/47] release the g_epfd whent he engine is shutdown --- src/core/lib/iomgr/ev_epoll1_linux.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index fb54e9c9547..dfe2d143a50 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -965,6 +965,7 @@ static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx, static void shutdown_engine(void) { fd_global_shutdown(); pollset_global_shutdown(); + close(g_epfd); } static const grpc_event_engine_vtable vtable = { From 87116f449985cd49728326ecbdb7293cc522b8ff Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Wed, 12 Jul 2017 14:22:46 -0700 Subject: [PATCH 19/47] Fix failures in libuv portability tests --- src/core/lib/iomgr/iomgr_uv.c | 4 + src/core/lib/iomgr/resolve_address_uv.c | 17 ++-- src/core/lib/iomgr/tcp_server_uv.c | 109 ++++++++++++++++-------- src/core/lib/iomgr/tcp_uv.c | 4 + 4 files changed, 91 insertions(+), 43 deletions(-) diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.c index 49d1a03c322..7a99a6efdd3 100644 --- a/src/core/lib/iomgr/iomgr_uv.c +++ b/src/core/lib/iomgr/iomgr_uv.c @@ -21,12 +21,16 @@ #ifdef GRPC_UV #include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/executor.h" #include "src/core/lib/iomgr/pollset_uv.h" #include "src/core/lib/iomgr/tcp_uv.h" void grpc_iomgr_platform_init(void) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_global_init(); grpc_register_tracer("tcp", &grpc_tcp_trace); + grpc_executor_set_threading(&exec_ctx, false); + grpc_exec_ctx_finish(&exec_ctx); } void grpc_iomgr_platform_flush(void) {} void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); } diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index a98b8e62db3..5180357cb16 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -114,11 +114,14 @@ static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status, grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_error *error; int retry_status; + char *port = r->port; gpr_free(req); retry_status = retry_named_port_failure(status, r, getaddrinfo_callback); if (retry_status == 0) { - // The request is being retried. Nothing should be done here + /* The request is being retried. It is using its own port string, so we free + * the original one */ + gpr_free(port); return; } /* Either no retry was attempted, or the retry failed. Either way, the @@ -218,16 +221,18 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, grpc_pollset_set *interested_parties, grpc_closure *on_done, grpc_resolved_addresses **addrs) { - uv_getaddrinfo_t *req; - request *r; - struct addrinfo *hints; - char *host; - char *port; + uv_getaddrinfo_t *req = NULL; + request *r = NULL; + struct addrinfo *hints = NULL; + char *host = NULL; + char *port = NULL; grpc_error *err; int s; err = try_split_host_port(name, default_port, &host, &port); if (err != GRPC_ERROR_NONE) { GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); + gpr_free(host); + gpr_free(port); return; } r = gpr_malloc(sizeof(request)); diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index 2ab836cc34d..a63a36bf587 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -20,6 +20,7 @@ #ifdef GRPC_UV +#include #include #include @@ -43,6 +44,8 @@ struct grpc_tcp_listener { struct grpc_tcp_listener *next; bool closed; + + bool has_pending_connection; }; struct grpc_tcp_server { @@ -142,10 +145,12 @@ static void handle_close_callback(uv_handle_t *handle) { } static void close_listener(grpc_tcp_listener *sp) { + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; if (!sp->closed) { sp->closed = true; uv_close((uv_handle_t *)sp->handle, handle_close_callback); } + grpc_exec_ctx_finish(&exec_ctx); } static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { @@ -183,18 +188,49 @@ void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } } -static void accepted_connection_close_cb(uv_handle_t *handle) { - gpr_free(handle); -} - -static void on_connect(uv_stream_t *server, int status) { - grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; +static void finish_accept(grpc_exec_ctx *exec_ctx, grpc_tcp_listener *sp) { + grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); uv_tcp_t *client; grpc_endpoint *ep = NULL; - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resolved_address peer_name; char *peer_name_string; int err; + uv_tcp_t *server = sp->handle; + + client = gpr_malloc(sizeof(uv_tcp_t)); + uv_tcp_init(uv_default_loop(), client); + // UV documentation says this is guaranteed to succeed + uv_accept((uv_stream_t *)server, (uv_stream_t *)client); + peer_name_string = NULL; + memset(&peer_name, 0, sizeof(grpc_resolved_address)); + peer_name.len = sizeof(struct sockaddr_storage); + err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr, + (int *)&peer_name.len); + if (err == 0) { + peer_name_string = grpc_sockaddr_to_uri(&peer_name); + } else { + gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(err)); + } + if (GRPC_TRACER_ON(grpc_tcp_trace)) { + if (peer_name_string) { + gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s", + sp->server, peer_name_string); + } else { + gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server); + } + } + ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); + acceptor->from_server = sp->server; + acceptor->port_index = sp->port_index; + acceptor->fd_index = 0; + sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, + acceptor); + gpr_free(peer_name_string); +} + +static void on_connect(uv_stream_t *server, int status) { + grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data; + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; if (status < 0) { switch (status) { @@ -207,35 +243,17 @@ static void on_connect(uv_stream_t *server, int status) { } } - client = gpr_malloc(sizeof(uv_tcp_t)); - uv_tcp_init(uv_default_loop(), client); - // UV documentation says this is guaranteed to succeed - uv_accept((uv_stream_t *)server, (uv_stream_t *)client); - // If the server has not been started, we discard incoming connections - if (sp->server->on_accept_cb == NULL) { - uv_close((uv_handle_t *)client, accepted_connection_close_cb); + GPR_ASSERT(!sp->has_pending_connection); + + gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server); + + // Create acceptor. + if (sp->server->on_accept_cb) { + finish_accept(&exec_ctx, sp); } else { - peer_name_string = NULL; - memset(&peer_name, 0, sizeof(grpc_resolved_address)); - peer_name.len = sizeof(struct sockaddr_storage); - err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr, - (int *)&peer_name.len); - if (err == 0) { - peer_name_string = grpc_sockaddr_to_uri(&peer_name); - } else { - gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status)); - } - ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string); - // Create acceptor. - grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor)); - acceptor->from_server = sp->server; - acceptor->port_index = sp->port_index; - acceptor->fd_index = 0; - sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL, - acceptor); - grpc_exec_ctx_finish(&exec_ctx); - gpr_free(peer_name_string); + sp->has_pending_connection = true; } + grpc_exec_ctx_finish(&exec_ctx); } static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle, @@ -282,7 +300,7 @@ static grpc_error *add_socket_to_server(grpc_tcp_server *s, uv_tcp_t *handle, GPR_ASSERT(port >= 0); GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server"); - sp = gpr_malloc(sizeof(grpc_tcp_listener)); + sp = gpr_zalloc(sizeof(grpc_tcp_listener)); sp->next = NULL; if (s->head == NULL) { s->head = sp; @@ -366,6 +384,18 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, gpr_free(allocated_addr); + if (GRPC_TRACER_ON(grpc_tcp_trace)) { + char *port_string; + grpc_sockaddr_to_string(&port_string, addr, 0); + const char *str = grpc_error_string(error); + if (port_string) { + gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str); + gpr_free(port_string); + } else { + gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str); + } + } + if (error != GRPC_ERROR_NONE) { grpc_error *error_out = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "Failed to add port to server", &error, 1); @@ -385,13 +415,18 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, grpc_tcp_listener *sp; (void)pollsets; (void)pollset_count; + if (GRPC_TRACER_ON(grpc_tcp_trace)) { + gpr_log(GPR_DEBUG, "SERVER_START %p", server); + } GPR_ASSERT(on_accept_cb); GPR_ASSERT(!server->on_accept_cb); server->on_accept_cb = on_accept_cb; server->on_accept_cb_arg = cb_arg; for (sp = server->head; sp; sp = sp->next) { - GPR_ASSERT(uv_listen((uv_stream_t *)sp->handle, SOMAXCONN, on_connect) == - 0); + if (sp->has_pending_connection) { + finish_accept(exec_ctx, sp); + sp->has_pending_connection = false; + } } } diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index ff5fd3edc85..0302af2a062 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -307,6 +307,10 @@ static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_error *why) { grpc_tcp *tcp = (grpc_tcp *)ep; if (!tcp->shutting_down) { + if (GRPC_TRACER_ON(grpc_tcp_trace)) { + const char *str = grpc_error_string(why); + gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->handle, str); + } tcp->shutting_down = true; uv_shutdown_t *req = &tcp->shutdown_req; uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback); From b8e07ad780ceb0bae7c431d0de1c6343bf275ceb Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Tue, 18 Jul 2017 13:20:55 -0700 Subject: [PATCH 20/47] Add asserts that uv calls all run on the same thread --- BUILD | 1 + build.yaml | 1 + gRPC-Core.podspec | 2 + grpc.gemspec | 1 + package.xml | 1 + src/core/lib/iomgr/iomgr_uv.c | 4 ++ src/core/lib/iomgr/iomgr_uv.h | 37 +++++++++++++++++++ src/core/lib/iomgr/pollset_uv.c | 7 ++++ src/core/lib/iomgr/resolve_address_uv.c | 4 ++ src/core/lib/iomgr/tcp_client_uv.c | 3 ++ src/core/lib/iomgr/tcp_server_uv.c | 6 +++ src/core/lib/iomgr/tcp_uv.c | 3 ++ src/core/lib/iomgr/timer_uv.c | 4 ++ tools/doxygen/Doxyfile.core.internal | 1 + .../generated/sources_and_headers.json | 2 + tools/run_tests/run_tests.py | 2 +- vsprojects/vcxproj/grpc/grpc.vcxproj | 1 + vsprojects/vcxproj/grpc/grpc.vcxproj.filters | 3 ++ .../grpc_test_util/grpc_test_util.vcxproj | 1 + .../grpc_test_util.vcxproj.filters | 3 ++ .../grpc_unsecure/grpc_unsecure.vcxproj | 1 + .../grpc_unsecure.vcxproj.filters | 3 ++ 22 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 src/core/lib/iomgr/iomgr_uv.h diff --git a/BUILD b/BUILD index 495bb668eb1..189a8276e52 100644 --- a/BUILD +++ b/BUILD @@ -597,6 +597,7 @@ grpc_cc_library( "src/core/lib/iomgr/iomgr.h", "src/core/lib/iomgr/iomgr_internal.h", "src/core/lib/iomgr/iomgr_posix.h", + "src/core/lib/iomgr/iomgr_uv.h", "src/core/lib/iomgr/is_epollexclusive_available.h", "src/core/lib/iomgr/load_file.h", "src/core/lib/iomgr/lockfree_event.h", diff --git a/build.yaml b/build.yaml index e55c4ca3012..f43a13e9bcd 100644 --- a/build.yaml +++ b/build.yaml @@ -213,6 +213,7 @@ filegroups: - src/core/lib/iomgr/iomgr.h - src/core/lib/iomgr/iomgr_internal.h - src/core/lib/iomgr/iomgr_posix.h + - src/core/lib/iomgr/iomgr_uv.h - src/core/lib/iomgr/is_epollexclusive_available.h - src/core/lib/iomgr/load_file.h - src/core/lib/iomgr/lockfree_event.h diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 3f841d51d8a..7a2c5a40f4f 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -276,6 +276,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/iomgr.h', 'src/core/lib/iomgr/iomgr_internal.h', 'src/core/lib/iomgr/iomgr_posix.h', + 'src/core/lib/iomgr/iomgr_uv.h', 'src/core/lib/iomgr/is_epollexclusive_available.h', 'src/core/lib/iomgr/load_file.h', 'src/core/lib/iomgr/lockfree_event.h', @@ -753,6 +754,7 @@ Pod::Spec.new do |s| 'src/core/lib/iomgr/iomgr.h', 'src/core/lib/iomgr/iomgr_internal.h', 'src/core/lib/iomgr/iomgr_posix.h', + 'src/core/lib/iomgr/iomgr_uv.h', 'src/core/lib/iomgr/is_epollexclusive_available.h', 'src/core/lib/iomgr/load_file.h', 'src/core/lib/iomgr/lockfree_event.h', diff --git a/grpc.gemspec b/grpc.gemspec index 663915b75ea..692977c7400 100755 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -208,6 +208,7 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/iomgr/iomgr.h ) s.files += %w( src/core/lib/iomgr/iomgr_internal.h ) s.files += %w( src/core/lib/iomgr/iomgr_posix.h ) + s.files += %w( src/core/lib/iomgr/iomgr_uv.h ) s.files += %w( src/core/lib/iomgr/is_epollexclusive_available.h ) s.files += %w( src/core/lib/iomgr/load_file.h ) s.files += %w( src/core/lib/iomgr/lockfree_event.h ) diff --git a/package.xml b/package.xml index cfa45f06d76..0714e614151 100644 --- a/package.xml +++ b/package.xml @@ -222,6 +222,7 @@ + diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.c index 7a99a6efdd3..c484a39c540 100644 --- a/src/core/lib/iomgr/iomgr_uv.c +++ b/src/core/lib/iomgr/iomgr_uv.c @@ -22,14 +22,18 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/iomgr/executor.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/pollset_uv.h" #include "src/core/lib/iomgr/tcp_uv.h" +gpr_thd_id grpc_init_thread; + void grpc_iomgr_platform_init(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_global_init(); grpc_register_tracer("tcp", &grpc_tcp_trace); grpc_executor_set_threading(&exec_ctx, false); + grpc_init_thread = gpr_thd_currentid(); grpc_exec_ctx_finish(&exec_ctx); } void grpc_iomgr_platform_flush(void) {} diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_uv.h new file mode 100644 index 00000000000..35c073a0330 --- /dev/null +++ b/src/core/lib/iomgr/iomgr_uv.h @@ -0,0 +1,37 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_IOMGR_IOMGR_UV_H +#define GRPC_CORE_LIB_IOMGR_IOMGR_UV_H + +#include "src/core/lib/iomgr/iomgr_internal.h" + +#include + +/* The thread ID of the thread on which grpc was initialized. Used to verify + * that all calls into libuv are made on that same thread */ +extern gpr_thd_id grpc_init_thread; + +#ifdef GRPC_UV_THREAD_CHECK +#define GRPC_ASSERT_SAME_THREAD() \ + GPR_ASSERT(gpr_thd_currentid() == grpc_init_thread) +#else +#define GRPC_ASSERT_SAME_THREAD() +#endif /* GRPC_UV_THREAD_CHECK */ + +#endif /* GRPC_CORE_LIB_IOMGR_IOMGR_UV_H */ diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index 1a54065a914..47c8c79cc2a 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -28,6 +28,7 @@ #include #include +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/pollset.h" #include "src/core/lib/iomgr/pollset_uv.h" @@ -69,6 +70,7 @@ void grpc_pollset_global_init(void) { } void grpc_pollset_global_shutdown(void) { + GRPC_ASSERT_SAME_THREAD(); gpr_mu_destroy(&grpc_polling_mu); uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb); } @@ -78,6 +80,7 @@ static void timer_run_cb(uv_timer_t *timer) {} static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { + GRPC_ASSERT_SAME_THREAD(); *mu = &grpc_polling_mu; uv_timer_init(uv_default_loop(), &pollset->timer); pollset->shutting_down = 0; @@ -86,6 +89,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(!pollset->shutting_down); + GRPC_ASSERT_SAME_THREAD(); pollset->shutting_down = 1; if (grpc_pollset_work_run_loop) { // Drain any pending UV callbacks without blocking @@ -98,6 +102,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { + GRPC_ASSERT_SAME_THREAD(); uv_close((uv_handle_t *)&pollset->timer, timer_close_cb); // timer.data is a boolean indicating that the timer has finished closing pollset->timer.data = (void *)0; @@ -112,6 +117,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, gpr_timespec now, gpr_timespec deadline) { uint64_t timeout; + GRPC_ASSERT_SAME_THREAD(); gpr_mu_unlock(&grpc_polling_mu); if (grpc_pollset_work_run_loop) { if (gpr_time_cmp(deadline, now) >= 0) { @@ -140,6 +146,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { + GRPC_ASSERT_SAME_THREAD(); uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); return GRPC_ERROR_NONE; } diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index 5180357cb16..f910ec26b26 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -30,6 +30,7 @@ #include "src/core/lib/iomgr/closure.h" #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/resolve_address.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" @@ -174,6 +175,8 @@ static grpc_error *blocking_resolve_address_impl( grpc_error *err; int retry_status; + GRPC_ASSERT_SAME_THREAD(); + req.addrinfo = NULL; err = try_split_host_port(name, default_port, &host, &port); @@ -228,6 +231,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, char *port = NULL; grpc_error *err; int s; + GRPC_ASSERT_SAME_THREAD(); err = try_split_host_port(name, default_port, &host, &port); if (err != GRPC_ERROR_NONE) { GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 2f1d237c07b..098ff7648de 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -26,6 +26,7 @@ #include #include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/tcp_uv.h" @@ -124,6 +125,8 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, (void)channel_args; (void)interested_parties; + GRPC_ASSERT_SAME_THREAD(); + if (channel_args != NULL) { for (size_t i = 0; i < channel_args->num_args; i++) { if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) { diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index a63a36bf587..7b45e08c9b8 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -28,6 +28,7 @@ #include "src/core/lib/iomgr/error.h" #include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/sockaddr.h" #include "src/core/lib/iomgr/sockaddr_utils.h" #include "src/core/lib/iomgr/tcp_server.h" @@ -107,6 +108,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { + GRPC_ASSERT_SAME_THREAD(); gpr_ref(&s->refs); return s; } @@ -173,6 +175,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { + GRPC_ASSERT_SAME_THREAD(); if (gpr_unref(&s->refs)) { /* Complete shutdown_starting work before destroying. */ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; @@ -335,6 +338,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, int status; grpc_error *error = GRPC_ERROR_NONE; + GRPC_ASSERT_SAME_THREAD(); + if (s->tail != NULL) { port_index = s->tail->port_index + 1; } @@ -415,6 +420,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, grpc_tcp_listener *sp; (void)pollsets; (void)pollset_count; + GRPC_ASSERT_SAME_THREAD(); if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "SERVER_START %p", server); } diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index 0302af2a062..115297634b1 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -30,6 +30,7 @@ #include #include "src/core/lib/iomgr/error.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/network_status_tracker.h" #include "src/core/lib/iomgr/resource_quota.h" #include "src/core/lib/iomgr/tcp_uv.h" @@ -183,6 +184,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_tcp *tcp = (grpc_tcp *)ep; int status; grpc_error *error = GRPC_ERROR_NONE; + GRPC_ASSERT_SAME_THREAD(); GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; tcp->read_slices = read_slices; @@ -236,6 +238,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, unsigned int i; grpc_slice *slice; uv_write_t *write_req; + GRPC_ASSERT_SAME_THREAD(); if (GRPC_TRACER_ON(grpc_tcp_trace)) { size_t j; diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index 4f204cfbf8c..e004ff5a3a4 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -24,6 +24,7 @@ #include #include "src/core/lib/debug/trace.h" +#include "src/core/lib/iomgr/iomgr_uv.h" #include "src/core/lib/iomgr/timer.h" #include @@ -42,6 +43,7 @@ static void stop_uv_timer(uv_timer_t *handle) { void run_expired_timer(uv_timer_t *handle) { grpc_timer *timer = (grpc_timer *)handle->data; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + GRPC_ASSERT_SAME_THREAD(); GPR_ASSERT(timer->pending); timer->pending = 0; GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE); @@ -54,6 +56,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec now) { uint64_t timeout; uv_timer_t *uv_timer; + GRPC_ASSERT_SAME_THREAD(); timer->closure = closure; if (gpr_time_cmp(deadline, now) <= 0) { timer->pending = 0; @@ -74,6 +77,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { + GRPC_ASSERT_SAME_THREAD(); if (timer->pending) { timer->pending = 0; GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 766c20f59b7..1263bdbb743 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1118,6 +1118,7 @@ src/core/lib/iomgr/iomgr_internal.h \ src/core/lib/iomgr/iomgr_posix.c \ src/core/lib/iomgr/iomgr_posix.h \ src/core/lib/iomgr/iomgr_uv.c \ +src/core/lib/iomgr/iomgr_uv.h \ src/core/lib/iomgr/iomgr_windows.c \ src/core/lib/iomgr/is_epollexclusive_available.c \ src/core/lib/iomgr/is_epollexclusive_available.h \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 477a258daa6..93411a8c614 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7654,6 +7654,7 @@ "src/core/lib/iomgr/iomgr.h", "src/core/lib/iomgr/iomgr_internal.h", "src/core/lib/iomgr/iomgr_posix.h", + "src/core/lib/iomgr/iomgr_uv.h", "src/core/lib/iomgr/is_epollexclusive_available.h", "src/core/lib/iomgr/load_file.h", "src/core/lib/iomgr/lockfree_event.h", @@ -7812,6 +7813,7 @@ "src/core/lib/iomgr/iomgr_posix.c", "src/core/lib/iomgr/iomgr_posix.h", "src/core/lib/iomgr/iomgr_uv.c", + "src/core/lib/iomgr/iomgr_uv.h", "src/core/lib/iomgr/iomgr_windows.c", "src/core/lib/iomgr/is_epollexclusive_available.c", "src/core/lib/iomgr/is_epollexclusive_available.h", diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 611868ce5a4..50eed6256c9 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -245,7 +245,7 @@ class CLanguage(object): self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker, self.args.compiler) if args.iomgr_platform == "uv": - cflags = '-DGRPC_UV ' + cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK' try: cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' ' except (subprocess.CalledProcessError, OSError): diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj b/vsprojects/vcxproj/grpc/grpc.vcxproj index f9badbbd353..12cb39a3df6 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj @@ -333,6 +333,7 @@ + diff --git a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters index f3e997e4e0e..c762200a008 100644 --- a/vsprojects/vcxproj/grpc/grpc.vcxproj.filters +++ b/vsprojects/vcxproj/grpc/grpc.vcxproj.filters @@ -944,6 +944,9 @@ src\core\lib\iomgr + + src\core\lib\iomgr + src\core\lib\iomgr diff --git a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj index 1899be91db3..98bf5aeb69a 100644 --- a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj +++ b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj @@ -228,6 +228,7 @@ + diff --git a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters index 9c81fc5bf51..145b51c206b 100644 --- a/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters +++ b/vsprojects/vcxproj/grpc_test_util/grpc_test_util.vcxproj.filters @@ -677,6 +677,9 @@ src\core\lib\iomgr + + src\core\lib\iomgr + src\core\lib\iomgr diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj index 3a865f3e812..12c352a3f81 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj @@ -323,6 +323,7 @@ + diff --git a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters index 0310ebc012c..0b598363b8b 100644 --- a/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc_unsecure/grpc_unsecure.vcxproj.filters @@ -854,6 +854,9 @@ src\core\lib\iomgr + + src\core\lib\iomgr + src\core\lib\iomgr From 79e2d8e89f635e07312d07afcc1f26a49aa48f06 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Tue, 18 Jul 2017 13:23:55 -0700 Subject: [PATCH 21/47] libuv tests: skip a test that spawns a thread --- build.yaml | 2 ++ tools/run_tests/generated/tests.json | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/build.yaml b/build.yaml index f43a13e9bcd..c8556c4b455 100644 --- a/build.yaml +++ b/build.yaml @@ -2370,6 +2370,8 @@ targets: - grpc - gpr_test_util - gpr + exclude_iomgrs: + - uv platforms: - linux secure: true diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index dd812414b9f..5be3cf796bd 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -1390,7 +1390,9 @@ ], "cpu_cost": 1.0, "exclude_configs": [], - "exclude_iomgrs": [], + "exclude_iomgrs": [ + "uv" + ], "flaky": false, "gtest": false, "language": "c", From 269d3b4faae39ac4a6be66baa5b5fc9a2efb86df Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Tue, 18 Jul 2017 15:09:17 -0700 Subject: [PATCH 22/47] Clang format --- src/core/lib/iomgr/iomgr_uv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_uv.h index 35c073a0330..bd406c34f72 100644 --- a/src/core/lib/iomgr/iomgr_uv.h +++ b/src/core/lib/iomgr/iomgr_uv.h @@ -28,7 +28,7 @@ extern gpr_thd_id grpc_init_thread; #ifdef GRPC_UV_THREAD_CHECK -#define GRPC_ASSERT_SAME_THREAD() \ +#define GRPC_ASSERT_SAME_THREAD() \ GPR_ASSERT(gpr_thd_currentid() == grpc_init_thread) #else #define GRPC_ASSERT_SAME_THREAD() From 0d8431afe1d5cf5ede4bad037ba2031586a8fe6c Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 18 Jul 2017 16:21:54 -0700 Subject: [PATCH 23/47] Add/update comments and remove unused fields --- src/core/lib/iomgr/ev_epoll1_linux.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index dfe2d143a50..f579b16fa58 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -120,10 +120,15 @@ struct grpc_pollset { bool reassigning_neighbourhood; grpc_pollset_worker *root_worker; bool kicked_without_poller; + + /* Set to true if the pollset is observed to have no workers available to + * poll */ bool seen_inactive; - bool shutting_down; /* Is the pollset shutting down ? */ - bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */ + bool shutting_down; /* Is the pollset shutting down ? */ grpc_closure *shutdown_closure; /* Called after after shutdown is complete */ + + /* Number of workers who are *about-to* attach themselves to the pollset + * worker list */ int begin_refs; grpc_pollset *next; @@ -294,12 +299,6 @@ static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd, static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_pollset *notifier) { grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read"); - - /* Note, it is possible that fd_become_readable might be called twice with - different 'notifier's when an fd becomes readable and it is in two epoll - sets (This can happen briefly during polling island merges). In such cases - it does not really matter which notifer is set as the read_notifier_pollset - (They would both point to the same polling island anyway) */ /* Use release store to match with acquire load in fd_get_read_notifier */ gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier); } @@ -442,13 +441,16 @@ static grpc_error *pollset_kick_all(grpc_pollset *pollset) { case DESIGNATED_POLLER: SET_KICK_STATE(worker, KICKED); append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd), - "pollset_shutdown"); + "pollset_kick_all"); break; } worker = worker->next; } while (worker != pollset->root_worker); } + // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here + // in the else case + return error; } @@ -577,6 +579,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->seen_inactive = false; if (neighbourhood->active_root == NULL) { neighbourhood->active_root = pollset->next = pollset->prev = pollset; + /* TODO: sreek. Why would this worker state be other than UNKICKED + * here ? (since the worker isn't added to the pollset yet, there is no + * way it can be "found" by other threads to get kicked). */ + + /* If there is no designated poller, make this the designated poller */ if (worker->kick_state == UNKICKED && gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) { SET_KICK_STATE(worker, DESIGNATED_POLLER); @@ -605,8 +612,11 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset, worker, kick_state_string(worker->kick_state), pollset->shutting_down); } + if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) && worker->kick_state == UNKICKED) { + /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker + received a kick */ SET_KICK_STATE(worker, KICKED); } } From 30939f88548a7318cac2642d6149f651c09f931a Mon Sep 17 00:00:00 2001 From: ncteisen Date: Wed, 5 Jul 2017 15:43:18 -0700 Subject: [PATCH 24/47] Make CreateThreadPool settable --- src/cpp/client/secure_credentials.cc | 2 +- src/cpp/server/create_default_thread_pool.cc | 12 +++++++++++- src/cpp/server/secure_server_credentials.h | 2 +- src/cpp/server/thread_pool_interface.h | 6 +++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 057a058a3fb..0eefd0645b4 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -207,7 +207,7 @@ void MetadataCredentialsPluginWrapper::InvokePlugin( MetadataCredentialsPluginWrapper::MetadataCredentialsPluginWrapper( std::unique_ptr plugin) - : thread_pool_(CreateDefaultThreadPool()), plugin_(std::move(plugin)) {} + : thread_pool_(CreateThreadPool()), plugin_(std::move(plugin)) {} std::shared_ptr MetadataCredentialsFromPlugin( std::unique_ptr plugin) { diff --git a/src/cpp/server/create_default_thread_pool.cc b/src/cpp/server/create_default_thread_pool.cc index 17ad331c9c0..bf76303b454 100644 --- a/src/cpp/server/create_default_thread_pool.cc +++ b/src/cpp/server/create_default_thread_pool.cc @@ -24,12 +24,22 @@ namespace grpc { -ThreadPoolInterface* CreateDefaultThreadPool() { +static ThreadPoolInterface* CreateDefaultThreadPool() { int cores = gpr_cpu_num_cores(); if (!cores) cores = 4; return new DynamicThreadPool(cores); } +static CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPool; + +ThreadPoolInterface* CreateThreadPool() { + return g_ctp_impl(); +} + +void SetCreateThreadPool(CreateThreadPoolFunc func) { + g_ctp_impl = func; +} + } // namespace grpc #endif // !GRPC_CUSTOM_DEFAULT_THREAD_POOL diff --git a/src/cpp/server/secure_server_credentials.h b/src/cpp/server/secure_server_credentials.h index 212f0d1df3b..30ee1c7a366 100644 --- a/src/cpp/server/secure_server_credentials.h +++ b/src/cpp/server/secure_server_credentials.h @@ -39,7 +39,7 @@ class AuthMetadataProcessorAyncWrapper final { AuthMetadataProcessorAyncWrapper( const std::shared_ptr& processor) - : thread_pool_(CreateDefaultThreadPool()), processor_(processor) {} + : thread_pool_(CreateThreadPool()), processor_(processor) {} private: void InvokeProcessor(grpc_auth_context* context, const grpc_metadata* md, diff --git a/src/cpp/server/thread_pool_interface.h b/src/cpp/server/thread_pool_interface.h index 4f4fc7eaaa2..5a182982968 100644 --- a/src/cpp/server/thread_pool_interface.h +++ b/src/cpp/server/thread_pool_interface.h @@ -32,7 +32,11 @@ class ThreadPoolInterface { virtual void Add(const std::function& callback) = 0; }; -ThreadPoolInterface* CreateDefaultThreadPool(); +// Allows different codebases to use their own thread pool impls +typedef ThreadPoolInterface* (*CreateThreadPoolFunc)(void); +void SetCreateThreadPool(CreateThreadPoolFunc func); + +ThreadPoolInterface* CreateThreadPool(); } // namespace grpc From a5d557b96d873873e5f17d96ffca6088e3743af0 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Wed, 5 Jul 2017 15:52:28 -0700 Subject: [PATCH 25/47] Rename function to avoud future clash --- src/cpp/client/secure_credentials.cc | 2 +- src/cpp/server/create_default_thread_pool.cc | 12 ++++-------- src/cpp/server/secure_server_credentials.h | 2 +- src/cpp/server/thread_pool_interface.h | 2 +- 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 0eefd0645b4..057a058a3fb 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -207,7 +207,7 @@ void MetadataCredentialsPluginWrapper::InvokePlugin( MetadataCredentialsPluginWrapper::MetadataCredentialsPluginWrapper( std::unique_ptr plugin) - : thread_pool_(CreateThreadPool()), plugin_(std::move(plugin)) {} + : thread_pool_(CreateDefaultThreadPool()), plugin_(std::move(plugin)) {} std::shared_ptr MetadataCredentialsFromPlugin( std::unique_ptr plugin) { diff --git a/src/cpp/server/create_default_thread_pool.cc b/src/cpp/server/create_default_thread_pool.cc index bf76303b454..57faa17f6b9 100644 --- a/src/cpp/server/create_default_thread_pool.cc +++ b/src/cpp/server/create_default_thread_pool.cc @@ -24,21 +24,17 @@ namespace grpc { -static ThreadPoolInterface* CreateDefaultThreadPool() { +static ThreadPoolInterface* CreateDefaultThreadPoolImpl() { int cores = gpr_cpu_num_cores(); if (!cores) cores = 4; return new DynamicThreadPool(cores); } -static CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPool; +static CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl; -ThreadPoolInterface* CreateThreadPool() { - return g_ctp_impl(); -} +ThreadPoolInterface* CreateDefaultThreadPool() { return g_ctp_impl(); } -void SetCreateThreadPool(CreateThreadPoolFunc func) { - g_ctp_impl = func; -} +void SetCreateThreadPool(CreateThreadPoolFunc func) { g_ctp_impl = func; } } // namespace grpc diff --git a/src/cpp/server/secure_server_credentials.h b/src/cpp/server/secure_server_credentials.h index 30ee1c7a366..212f0d1df3b 100644 --- a/src/cpp/server/secure_server_credentials.h +++ b/src/cpp/server/secure_server_credentials.h @@ -39,7 +39,7 @@ class AuthMetadataProcessorAyncWrapper final { AuthMetadataProcessorAyncWrapper( const std::shared_ptr& processor) - : thread_pool_(CreateThreadPool()), processor_(processor) {} + : thread_pool_(CreateDefaultThreadPool()), processor_(processor) {} private: void InvokeProcessor(grpc_auth_context* context, const grpc_metadata* md, diff --git a/src/cpp/server/thread_pool_interface.h b/src/cpp/server/thread_pool_interface.h index 5a182982968..028842a776f 100644 --- a/src/cpp/server/thread_pool_interface.h +++ b/src/cpp/server/thread_pool_interface.h @@ -36,7 +36,7 @@ class ThreadPoolInterface { typedef ThreadPoolInterface* (*CreateThreadPoolFunc)(void); void SetCreateThreadPool(CreateThreadPoolFunc func); -ThreadPoolInterface* CreateThreadPool(); +ThreadPoolInterface* CreateDefaultThreadPool(); } // namespace grpc From eb70b9e0df3b51db4e2d3466607e7713425dfac7 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Thu, 6 Jul 2017 09:38:26 -0700 Subject: [PATCH 26/47] Anon namespace over static --- src/cpp/server/create_default_thread_pool.cc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/cpp/server/create_default_thread_pool.cc b/src/cpp/server/create_default_thread_pool.cc index 57faa17f6b9..8ca3e32c2fb 100644 --- a/src/cpp/server/create_default_thread_pool.cc +++ b/src/cpp/server/create_default_thread_pool.cc @@ -23,14 +23,17 @@ #ifndef GRPC_CUSTOM_DEFAULT_THREAD_POOL namespace grpc { +namespace { -static ThreadPoolInterface* CreateDefaultThreadPoolImpl() { +ThreadPoolInterface* CreateDefaultThreadPoolImpl() { int cores = gpr_cpu_num_cores(); if (!cores) cores = 4; return new DynamicThreadPool(cores); } -static CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl; +CreateThreadPoolFunc g_ctp_impl = CreateDefaultThreadPoolImpl; + +} // namespace ThreadPoolInterface* CreateDefaultThreadPool() { return g_ctp_impl(); } From 59a19a9d5ecc34b60fd6c035a6cb261261dd48fe Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 18 Jul 2017 17:26:08 -0700 Subject: [PATCH 27/47] make sure that client-side view of calls is robust --- src/ruby/lib/grpc/generic/active_call.rb | 17 +++- src/ruby/spec/generic/client_stub_spec.rb | 97 ++++++++++++++++++++--- 2 files changed, 104 insertions(+), 10 deletions(-) diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index 96c773a995d..4a748a4ac29 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -46,7 +46,7 @@ module GRPC extend Forwardable attr_reader :deadline, :metadata_sent, :metadata_to_send def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=, - :peer, :peer_cert, :trailing_metadata + :peer, :peer_cert, :trailing_metadata, :status # client_invoke begins a client invocation. # @@ -105,6 +105,8 @@ module GRPC @input_stream_done = false @call_finished = false @call_finished_mu = Mutex.new + @client_call_executed = false + @client_call_executed_mu = Mutex.new end # Sends the initial metadata that has yet to be sent. @@ -327,6 +329,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Object] the response received from the server def request_response(req, metadata: {}) + raise_error_if_already_executed ops = { SEND_MESSAGE => @marshal.call(req), SEND_CLOSE_FROM_CLIENT => nil, @@ -369,6 +372,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Object] the response received from the server def client_streamer(requests, metadata: {}) + raise_error_if_already_executed begin merge_metadata_and_send_if_not_already_sent(metadata) requests.each { |r| @call.run_batch(SEND_MESSAGE => @marshal.call(r)) } @@ -411,6 +415,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Enumerator|nil] a response Enumerator def server_streamer(req, metadata: {}) + raise_error_if_already_executed ops = { SEND_MESSAGE => @marshal.call(req), SEND_CLOSE_FROM_CLIENT => nil @@ -468,6 +473,7 @@ module GRPC # a list, multiple metadata for its key are sent # @return [Enumerator, nil] a response Enumerator def bidi_streamer(requests, metadata: {}, &blk) + raise_error_if_already_executed # Metadata might have already been sent if this is an operation view merge_metadata_and_send_if_not_already_sent(metadata) bd = BidiCall.new(@call, @@ -572,6 +578,15 @@ module GRPC merge_metadata_to_send(metadata) && send_initial_metadata end + def raise_error_if_already_executed + @client_call_executed_mu.synchronize do + if @client_call_executed + fail GRPC::Core::CallError, 'attempting to re-run a call' + end + @client_call_executed = true + end + end + def self.view_class(*visible_methods) Class.new do extend ::Forwardable diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 3b8f72eda11..7b5e6a95a43 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -36,6 +36,33 @@ include GRPC::Core::StatusCodes include GRPC::Core::TimeConsts include GRPC::Core::CallOps +# check that methods on a finished/closed call t crash +def check_op_view_of_finished_client_call_is_robust(op_view) + # use read_response_stream to try to iterate through + # possible response stream + fail('need something to attempt reads') unless block_given? + expect do + resp = op_view.execute + yield resp + end.to raise_error(GRPC::Core::CallError) + + expect { op_view.start_call }.to raise_error(RuntimeError) + + expect do + op_view.wait + op_view.cancel + + op_view.metadata + op_view.trailing_metadata + op_view.status + + op_view.cancelled? + op_view.deadline + op_view.write_flag + op_view.write_flag = 1 + end.to_not raise_error +end + describe 'ClientStub' do let(:noop) { proc { |x| x } } @@ -231,15 +258,27 @@ describe 'ClientStub' do it_behaves_like 'request response' - it 'sends metadata to the server ok when running start_call first' do + def run_op_view_metadata_test(run_start_call_first) server_port = create_test_server host = "localhost:#{server_port}" th = run_request_response(@sent_msg, @resp, @pass, k1: 'v1', k2: 'v2') stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) - expect(get_response(stub)).to eq(@resp) + expect( + get_response(stub, + run_start_call_first: run_start_call_first)).to eq(@resp) th.join end + + it 'sends metadata to the server ok when running start_call first' do + run_op_view_metadata_test(true) + check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + end + + it 'does not crash when used after the call has been finished' do + run_op_view_metadata_test(false) + check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + end end end @@ -307,11 +346,23 @@ describe 'ClientStub' do it_behaves_like 'client streaming' - it 'sends metadata to the server ok when running start_call first' do + def run_op_view_metadata_test(run_start_call_first) th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata) - expect(get_response(@stub, run_start_call_first: true)).to eq(@resp) + expect( + get_response(@stub, + run_start_call_first: run_start_call_first)).to eq(@resp) th.join end + + it 'sends metadata to the server ok when running start_call first' do + run_op_view_metadata_test(true) + check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + end + + it 'does not crash when used after the call has been finished' do + run_op_view_metadata_test(false) + check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + end end end @@ -377,7 +428,7 @@ describe 'ClientStub' do end end - describe 'without a call operation', test2: true do + describe 'without a call operation' do def get_responses(stub, unmarshal: noop) e = stub.server_streamer(@method, @sent_msg, noop, unmarshal, metadata: @metadata) @@ -405,16 +456,30 @@ describe 'ClientStub' do it_behaves_like 'server streaming' - it 'should send metadata to the server ok when start_call is run first' do + def run_op_view_metadata_test(run_start_call_first) server_port = create_test_server host = "localhost:#{server_port}" th = run_server_streamer(@sent_msg, @replys, @fail, k1: 'v1', k2: 'v2') stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) - e = get_responses(stub, run_start_call_first: true) + e = get_responses(stub, run_start_call_first: run_start_call_first) expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) th.join end + + it 'should send metadata to the server ok when start_call is run first' do + run_op_view_metadata_test(true) + check_op_view_of_finished_client_call_is_robust(@op) do |responses| + responses.each { |r| p r } + end + end + + it 'does not crash when used after the call has been finished' do + run_op_view_metadata_test(false) + check_op_view_of_finished_client_call_is_robust(@op) do |responses| + responses.each { |r| p r } + end + end end end @@ -501,14 +566,28 @@ describe 'ClientStub' do it_behaves_like 'bidi streaming' - it 'can run start_call before executing the call' do + def run_op_view_metadata_test(run_start_call_first) th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys, @pass) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) - e = get_responses(stub, run_start_call_first: true) + e = get_responses(stub, run_start_call_first: run_start_call_first) expect(e.collect { |r| r }).to eq(@replys) th.join end + + it 'can run start_call before executing the call' do + run_op_view_metadata_test(true) + check_op_view_of_finished_client_call_is_robust(@op) do |responses| + responses.each { |r| p r } + end + end + + it 'doesnt crash when op_view used after call has finished' do + run_op_view_metadata_test(false) + check_op_view_of_finished_client_call_is_robust(@op) do |responses| + responses.each { |r| p r } + end + end end end From e6506bc9b2cf32351587e90dca72aa18640615d6 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 18 Jul 2017 21:43:45 -0700 Subject: [PATCH 28/47] Fix race-condition in epoll1 poller's begin_worker() and pollset_kick() which caused the designated poller to miss a kick in some cases --- src/core/lib/iomgr/ev_epoll1_linux.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index f579b16fa58..bd7c955d035 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -600,6 +600,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, } gpr_mu_unlock(&neighbourhood->mu); } + worker_insert(pollset, worker); pollset->begin_refs--; if (worker->kick_state == UNKICKED) { @@ -628,7 +629,18 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, pollset->shutting_down); } - return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down; + /* We release pollset lock in this function at a couple of places: + * 1. Brielfly when assigning pollset to a neighbourhood + * 2. When doing gpr_cv_wait() + * It is possible that 'kicked_without_poller' was set to true during (1) and + * 'shutting_down' is set to true during (1) or (2). If either of them is + * true, this worker cannot do polling */ + + /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller + * case; especially when the worker is the DESIGNATED_POLLER */ + + return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down && + !pollset->kicked_without_poller; } static bool check_neighbourhood_for_available_poller( From a0616efadf830db1b861bd96afbac8a47dad26c5 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Tue, 18 Jul 2017 23:49:49 -0700 Subject: [PATCH 29/47] Consume kicked_without_poller --- src/core/lib/iomgr/ev_epoll1_linux.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index bd7c955d035..77d59059d43 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -630,17 +630,20 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, } /* We release pollset lock in this function at a couple of places: - * 1. Brielfly when assigning pollset to a neighbourhood + * 1. Briefly when assigning pollset to a neighbourhood * 2. When doing gpr_cv_wait() * It is possible that 'kicked_without_poller' was set to true during (1) and * 'shutting_down' is set to true during (1) or (2). If either of them is * true, this worker cannot do polling */ - /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller * case; especially when the worker is the DESIGNATED_POLLER */ - return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down && - !pollset->kicked_without_poller; + if (pollset->kicked_without_poller) { + pollset->kicked_without_poller = false; + return false; + } + + return worker->kick_state == DESIGNATED_POLLER && !pollset->shutting_down; } static bool check_neighbourhood_for_available_poller( From 6617790c18e4048671d64d03212cfe84b960f376 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Tue, 18 Jul 2017 18:00:38 -0700 Subject: [PATCH 30/47] Add SO_REUSEPORT support to uv iomgr code --- src/core/lib/iomgr/sockaddr_utils.c | 5 +++++ src/core/lib/iomgr/sockaddr_utils.h | 2 ++ src/core/lib/iomgr/tcp_server_uv.c | 14 +++++++++++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/sockaddr_utils.c b/src/core/lib/iomgr/sockaddr_utils.c index 99dc2f1c78a..3f4145d104e 100644 --- a/src/core/lib/iomgr/sockaddr_utils.c +++ b/src/core/lib/iomgr/sockaddr_utils.c @@ -220,6 +220,11 @@ const char *grpc_sockaddr_get_uri_scheme( return NULL; } +int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr) { + const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; + return addr->sa_family; +} + int grpc_sockaddr_get_port(const grpc_resolved_address *resolved_addr) { const struct sockaddr *addr = (const struct sockaddr *)resolved_addr->addr; switch (addr->sa_family) { diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h index 7692b969f2b..a589a197054 100644 --- a/src/core/lib/iomgr/sockaddr_utils.h +++ b/src/core/lib/iomgr/sockaddr_utils.h @@ -75,4 +75,6 @@ char *grpc_sockaddr_to_uri(const grpc_resolved_address *addr); /* Returns the URI scheme corresponding to \a addr */ const char *grpc_sockaddr_get_uri_scheme(const grpc_resolved_address *addr); +int grpc_sockaddr_get_family(const grpc_resolved_address *resolved_addr); + #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */ diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index 2ab836cc34d..079c9135796 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -316,6 +316,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, unsigned port_index = 0; int status; grpc_error *error = GRPC_ERROR_NONE; + int family; if (s->tail != NULL) { port_index = s->tail->port_index + 1; @@ -353,7 +354,18 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, } handle = gpr_malloc(sizeof(uv_tcp_t)); - status = uv_tcp_init(uv_default_loop(), handle); + + family = grpc_sockaddr_get_family(addr); + status = uv_tcp_init_ex(uv_default_loop(), handle, (unsigned int)family); +#if defined(GPR_LINUX) && defined(SO_REUSEPORT) + if (family == AF_INET || family == AF_INET6) { + int fd; + uv_fileno((uv_handle_t *)handle, &fd); + int enable = 1; + setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &enable, sizeof(enable)); + } +#endif /* GPR_LINUX && SO_REUSEPORT */ + if (status == 0) { error = add_socket_to_server(s, handle, addr, port_index, &sp); } else { From f7350ea6b7b58d632bf4a8aafaa0354e022d9c0b Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 19 Jul 2017 10:26:41 -0700 Subject: [PATCH 31/47] Adding connect auth feature. Proxy-Authorization header is being inserted when user creds are present in uri --- CMakeLists.txt | 2 + Makefile | 2 + .../ext/filters/client_channel/http_proxy.c | 53 +- test/core/end2end/end2end_nosec_tests.c | 8 + test/core/end2end/end2end_tests.c | 8 + test/core/end2end/fixtures/h2_http_proxy.c | 30 +- .../end2end/fixtures/http_proxy_fixture.c | 43 +- .../end2end/fixtures/http_proxy_fixture.h | 21 +- test/core/end2end/gen_build_yaml.py | 1 + test/core/end2end/generate_tests.bzl | 1 + .../end2end/tests/payload_with_proxy_auth.c | 302 ++++++ .../generated/sources_and_headers.json | 2 + tools/run_tests/generated/tests.json | 904 ++++++++++++++++-- .../end2end_nosec_tests.vcxproj | 2 + .../end2end_nosec_tests.vcxproj.filters | 3 + .../tests/end2end_tests/end2end_tests.vcxproj | 2 + .../end2end_tests.vcxproj.filters | 3 + 17 files changed, 1300 insertions(+), 87 deletions(-) create mode 100644 test/core/end2end/tests/payload_with_proxy_auth.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 3dd8ec45037..3b20c045943 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4337,6 +4337,7 @@ add_library(end2end_tests test/core/end2end/tests/no_logging.c test/core/end2end/tests/no_op.c test/core/end2end/tests/payload.c + test/core/end2end/tests/payload_with_proxy_auth.c test/core/end2end/tests/ping.c test/core/end2end/tests/ping_pong_streaming.c test/core/end2end/tests/registered_call.c @@ -4436,6 +4437,7 @@ add_library(end2end_nosec_tests test/core/end2end/tests/no_logging.c test/core/end2end/tests/no_op.c test/core/end2end/tests/payload.c + test/core/end2end/tests/payload_with_proxy_auth.c test/core/end2end/tests/ping.c test/core/end2end/tests/ping_pong_streaming.c test/core/end2end/tests/registered_call.c diff --git a/Makefile b/Makefile index f58a02df9ae..e68ccae3b07 100644 --- a/Makefile +++ b/Makefile @@ -7926,6 +7926,7 @@ LIBEND2END_TESTS_SRC = \ test/core/end2end/tests/no_logging.c \ test/core/end2end/tests/no_op.c \ test/core/end2end/tests/payload.c \ + test/core/end2end/tests/payload_with_proxy_auth.c \ test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping_pong_streaming.c \ test/core/end2end/tests/registered_call.c \ @@ -8020,6 +8021,7 @@ LIBEND2END_NOSEC_TESTS_SRC = \ test/core/end2end/tests/no_logging.c \ test/core/end2end/tests/no_op.c \ test/core/end2end/tests/payload.c \ + test/core/end2end/tests/payload_with_proxy_auth.c \ test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping_pong_streaming.c \ test/core/end2end/tests/registered_call.c \ diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index cfb5ec6f00e..faa4b3c319c 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -30,13 +30,17 @@ #include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" #include "src/core/lib/support/env.h" +#include "src/core/lib/support/string.h" +#include "src/core/lib/slice/b64.h" -static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) { +static void grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, + char **name_to_resolve, + char **user_cred) { + *name_to_resolve = NULL; char* uri_str = gpr_getenv("http_proxy"); - if (uri_str == NULL) return NULL; + if (uri_str == NULL) return; grpc_uri* uri = grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */); - char* proxy_name = NULL; if (uri == NULL || uri->authority == NULL) { gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var"); goto done; @@ -45,15 +49,18 @@ static char* grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx) { gpr_log(GPR_ERROR, "'%s' scheme not supported in proxy URI", uri->scheme); goto done; } - if (strchr(uri->authority, '@') != NULL) { - gpr_log(GPR_ERROR, "userinfo not supported in proxy URI"); - goto done; + char *user_cred_end = strchr(uri->authority, '@'); + if (user_cred_end != NULL) { + *name_to_resolve = gpr_strdup(user_cred_end + 1); + *user_cred_end = '\0'; + *user_cred = gpr_strdup(uri->authority); + gpr_log(GPR_INFO, "userinfo found in proxy URI"); + } else { + *name_to_resolve = gpr_strdup(uri->authority); } - proxy_name = gpr_strdup(uri->authority); done: gpr_free(uri_str); grpc_uri_destroy(uri); - return proxy_name; } static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, @@ -62,7 +69,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { - *name_to_resolve = grpc_get_http_proxy_server(exec_ctx); + char *user_cred = NULL; + grpc_get_http_proxy_server(exec_ctx, name_to_resolve, &user_cred); if (*name_to_resolve == NULL) return false; grpc_uri* uri = grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); @@ -71,19 +79,40 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, "'http_proxy' environment variable set, but cannot " "parse server URI '%s' -- not using proxy", server_uri); - if (uri != NULL) grpc_uri_destroy(uri); + if (uri != NULL) { + gpr_free(user_cred); + grpc_uri_destroy(uri); + } return false; } if (strcmp(uri->scheme, "unix") == 0) { gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'", server_uri); + gpr_free(user_cred); grpc_uri_destroy(uri); return false; } - grpc_arg new_arg = grpc_channel_arg_string_create( + + grpc_arg args_to_add[2]; + args_to_add[0] = grpc_channel_arg_string_create( GRPC_ARG_HTTP_CONNECT_SERVER, uri->path[0] == '/' ? uri->path + 1 : uri->path); - *new_args = grpc_channel_args_copy_and_add(args, &new_arg, 1); + + if(user_cred != NULL) { + /* Use base64 encoding for user credentials */ + char *encoded_user_cred = + grpc_base64_encode(user_auth, strlen(user_cred), 0, 0); + char *header; + gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred); + gpr_free(encoded_user_cred); + args_to_add[1] = grpc_channel_arg_string_create( + GRPC_ARG_HTTP_CONNECT_HEADERS, header); + *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2); + gpr_free(header); + } else { + *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1); + } + gpr_free(user_cred); grpc_uri_destroy(uri); return true; } diff --git a/test/core/end2end/end2end_nosec_tests.c b/test/core/end2end/end2end_nosec_tests.c index ae1db54f1a8..483f84e7837 100644 --- a/test/core/end2end/end2end_nosec_tests.c +++ b/test/core/end2end/end2end_nosec_tests.c @@ -102,6 +102,8 @@ extern void no_op(grpc_end2end_test_config config); extern void no_op_pre_init(void); extern void payload(grpc_end2end_test_config config); extern void payload_pre_init(void); +extern void payload_with_proxy_auth(grpc_end2end_test_config config); +extern void payload_with_proxy_auth_pre_init(void); extern void ping(grpc_end2end_test_config config); extern void ping_pre_init(void); extern void ping_pong_streaming(grpc_end2end_test_config config); @@ -179,6 +181,7 @@ void grpc_end2end_tests_pre_init(void) { no_logging_pre_init(); no_op_pre_init(); payload_pre_init(); + payload_with_proxy_auth_pre_init(); ping_pre_init(); ping_pong_streaming_pre_init(); registered_call_pre_init(); @@ -242,6 +245,7 @@ void grpc_end2end_tests(int argc, char **argv, no_logging(config); no_op(config); payload(config); + payload_with_proxy_auth(config); ping(config); ping_pong_streaming(config); registered_call(config); @@ -408,6 +412,10 @@ void grpc_end2end_tests(int argc, char **argv, payload(config); continue; } + if (0 == strcmp("payload_with_proxy_auth", argv[i])) { + payload_with_proxy_auth(config); + continue; + } if (0 == strcmp("ping", argv[i])) { ping(config); continue; diff --git a/test/core/end2end/end2end_tests.c b/test/core/end2end/end2end_tests.c index d18dd9c7b6b..745546dbb7f 100644 --- a/test/core/end2end/end2end_tests.c +++ b/test/core/end2end/end2end_tests.c @@ -104,6 +104,8 @@ extern void no_op(grpc_end2end_test_config config); extern void no_op_pre_init(void); extern void payload(grpc_end2end_test_config config); extern void payload_pre_init(void); +extern void payload_with_proxy_auth(grpc_end2end_test_config config); +extern void payload_with_proxy_auth_pre_init(void); extern void ping(grpc_end2end_test_config config); extern void ping_pre_init(void); extern void ping_pong_streaming(grpc_end2end_test_config config); @@ -182,6 +184,7 @@ void grpc_end2end_tests_pre_init(void) { no_logging_pre_init(); no_op_pre_init(); payload_pre_init(); + payload_with_proxy_auth_pre_init(); ping_pre_init(); ping_pong_streaming_pre_init(); registered_call_pre_init(); @@ -246,6 +249,7 @@ void grpc_end2end_tests(int argc, char **argv, no_logging(config); no_op(config); payload(config); + payload_with_proxy_auth(config); ping(config); ping_pong_streaming(config); registered_call(config); @@ -416,6 +420,10 @@ void grpc_end2end_tests(int argc, char **argv, payload(config); continue; } + if (0 == strcmp("payload_with_proxy_auth", argv[i])) { + payload_with_proxy_auth(config); + continue; + } if (0 == strcmp("ping", argv[i])) { ping(config); continue; diff --git a/test/core/end2end/fixtures/h2_http_proxy.c b/test/core/end2end/fixtures/h2_http_proxy.c index f8c88e59537..f87036d52e2 100644 --- a/test/core/end2end/fixtures/h2_http_proxy.c +++ b/test/core/end2end/fixtures/h2_http_proxy.c @@ -47,11 +47,26 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data)); const int server_port = grpc_pick_unused_port_or_die(); gpr_join_host_port(&ffd->server_addr, "localhost", server_port); - ffd->proxy = grpc_end2end_http_proxy_create(); + + /*const grpc_arg *proxy_auth_arg = + grpc_channel_args_find(client_args, "test_uses_proxy_auth"); + ffd->proxy = grpc_end2end_http_proxy_create(proxy_args);*/ + //If we are testing proxy auth, add the proxy auth arg to proxy channel args + grpc_channel_args *proxy_args = NULL; + const grpc_arg *proxy_auth_arg = grpc_channel_args_find( + client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT); + if(proxy_auth_arg) { + proxy_args = grpc_channel_args_copy_and_add(NULL, proxy_auth_arg, 1); + } + ffd->proxy = grpc_end2end_http_proxy_create(proxy_args); + grpc_channel_args_destroy(&exec_ctx, proxy_args); + + grpc_exec_ctx_finish(&exec_ctx); f.fixture_data = ffd; f.cq = grpc_completion_queue_create_for_next(NULL); @@ -64,8 +79,17 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f, grpc_channel_args *client_args) { fullstack_fixture_data *ffd = f->fixture_data; char *proxy_uri; - gpr_asprintf(&proxy_uri, "http://%s", - grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); + + // If testing for proxy auth, add credentials to proxy uri + if(grpc_channel_args_find( + client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) == NULL) { + gpr_asprintf(&proxy_uri, "http://%s", + grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); + } else { + gpr_asprintf(&proxy_uri, "http://%s@%s", + GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED, + grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); + } gpr_setenv("http_proxy", proxy_uri); gpr_free(proxy_uri); f->client = grpc_insecure_channel_create(ffd->server_addr, client_args, NULL); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index 54693c49001..d69ed1a0866 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -22,6 +22,7 @@ #include +#include #include #include #include @@ -47,6 +48,7 @@ #include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/iomgr/timer.h" #include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/slice/b64.h" #include "test/core/util/port.h" struct grpc_end2end_http_proxy { @@ -352,6 +354,42 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, GRPC_ERROR_UNREF(error); return; } + // If proxy auth is being used, check if the header is present + if(grpc_channel_args_find( + conn->proxy->channel_args, + GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) != NULL) { + bool found = false, failed = false; + for(size_t i = 0; i < conn->http_request.hdr_count; i++) { + if(strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) { + found = true; + // Authentication type should be Basic + if(strncmp(conn->http_request.hdrs[i].value, "Basic", + strlen("Basic")) != 0) { + failed = true; + break; + } + // Check if encoded string is as expected + char *encoded_str_start = + strchr(conn->http_request.hdrs[i].value, ' ') + 1; + grpc_slice decoded_slice = + grpc_base64_decode(exec_ctx, encoded_str_start, 0); + if(grpc_slice_str_cmp( + decoded_slice, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED) != 0) { + failed = true; + break; + } + break; + } + } + if(!found || failed) { + const char *msg = "HTTP Connect could not verify authentication"; + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + proxy_connection_failed(exec_ctx, conn, true /* is_client */, + "HTTP proxy read request", error); + GRPC_ERROR_UNREF(error); + return; + } + } // Resolve address. grpc_resolved_addresses* resolved_addresses = NULL; error = grpc_blocking_resolve_address(conn->http_request.path, "80", @@ -436,7 +474,8 @@ static void thread_main(void* arg) { grpc_exec_ctx_finish(&exec_ctx); } -grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) { +grpc_end2end_http_proxy* grpc_end2end_http_proxy_create( + grpc_channel_args *args) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy)); @@ -448,7 +487,7 @@ grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(void) { gpr_join_host_port(&proxy->proxy_name, "localhost", proxy_port); gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name); // Create TCP server. - proxy->channel_args = grpc_channel_args_copy(NULL); + proxy->channel_args = grpc_channel_args_copy(args); grpc_error* error = grpc_tcp_server_create( &exec_ctx, NULL, proxy->channel_args, &proxy->server); GPR_ASSERT(error == GRPC_ERROR_NONE); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.h b/test/core/end2end/fixtures/http_proxy_fixture.h index a72162e846c..f3da0494ae0 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.h +++ b/test/core/end2end/fixtures/http_proxy_fixture.h @@ -16,11 +16,30 @@ * */ +#ifndef GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H +#define GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H + +#include + +/* The test credentials being used for HTTP Proxy Authorization */ +#define GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED "aladdin:opensesame" + +/* A channel arg key used to indicate that the channel uses proxy authorization. + * The value is of no consequence as just the presence of the argument is + * enough. It is currently kept as of type integer but can be changed as seen + * fit. + */ +#define GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT \ + "grpc.test.connect_auth" + typedef struct grpc_end2end_http_proxy grpc_end2end_http_proxy; -grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(); +grpc_end2end_http_proxy* grpc_end2end_http_proxy_create( + grpc_channel_args *args); void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy); const char* grpc_end2end_http_proxy_get_proxy_name( grpc_end2end_http_proxy* proxy); + +#endif /* GRPC_TEST_CORE_END2END_FIXTURES_HTTP_PROXY_FIXTURE_H */ diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index 6dffacf9d71..53ee9622226 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -122,6 +122,7 @@ END2END_TESTS = { 'no_logging': default_test_options._replace(traceable=False), 'no_op': default_test_options, 'payload': default_test_options, + 'payload_with_proxy_auth': default_test_options, 'load_reporting_hook': default_test_options, 'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU), 'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU), diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl index 3312f4e596c..57dffd4a3d0 100755 --- a/test/core/end2end/generate_tests.bzl +++ b/test/core/end2end/generate_tests.bzl @@ -106,6 +106,7 @@ END2END_TESTS = { 'no_logging': test_options(traceable=False), 'no_op': test_options(), 'payload': test_options(), + 'payload_with_proxy_auth': test_options(), 'load_reporting_hook': test_options(), 'ping_pong_streaming': test_options(), 'ping': test_options(needs_fullstack=True, proxyable=False), diff --git a/test/core/end2end/tests/payload_with_proxy_auth.c b/test/core/end2end/tests/payload_with_proxy_auth.c new file mode 100644 index 00000000000..4d176f9145b --- /dev/null +++ b/test/core/end2end/tests/payload_with_proxy_auth.c @@ -0,0 +1,302 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include "test/core/end2end/end2end_tests.h" +#include "test/core/end2end/fixtures/http_proxy_fixture.h" + +#include +#include + +#include +#include +#include +#include +#include +#include "test/core/end2end/cq_verifier.h" + +static void *tag(intptr_t t) { return (void *)t; } + +static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config, + const char *test_name, + grpc_channel_args *client_args, + grpc_channel_args *server_args) { + grpc_end2end_test_fixture f; + gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name); + f = config.create_fixture(client_args, server_args); + config.init_server(&f, server_args); + config.init_client(&f, client_args); + return f; +} + +static gpr_timespec n_seconds_from_now(int n) { + return grpc_timeout_seconds_to_deadline(n); +} + +static gpr_timespec five_seconds_from_now(void) { + return n_seconds_from_now(5); +} + +static void drain_cq(grpc_completion_queue *cq) { + grpc_event ev; + do { + ev = grpc_completion_queue_next(cq, five_seconds_from_now(), NULL); + } while (ev.type != GRPC_QUEUE_SHUTDOWN); +} + +static void shutdown_server(grpc_end2end_test_fixture *f) { + if (!f->server) return; + grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000)); + GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000), + grpc_timeout_seconds_to_deadline(5), + NULL) + .type == GRPC_OP_COMPLETE); + grpc_server_destroy(f->server); + f->server = NULL; +} + +static void shutdown_client(grpc_end2end_test_fixture *f) { + if (!f->client) return; + grpc_channel_destroy(f->client); + f->client = NULL; +} + +static void end_test(grpc_end2end_test_fixture *f) { + shutdown_server(f); + shutdown_client(f); + + grpc_completion_queue_shutdown(f->cq); + drain_cq(f->cq); + grpc_completion_queue_destroy(f->cq); + grpc_completion_queue_destroy(f->shutdown_cq); +} + +/* Creates and returns a grpc_slice containing random alphanumeric characters. + */ +static grpc_slice generate_random_slice() { + size_t i; + static const char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890"; + char *output; + const size_t output_size = 1024 * 1024; + output = gpr_malloc(output_size); + for (i = 0; i < output_size - 1; ++i) { + output[i] = chars[rand() % (int)(sizeof(chars) - 1)]; + } + output[output_size - 1] = '\0'; + grpc_slice out = grpc_slice_from_copied_string(output); + gpr_free(output); + return out; +} + +static void request_response_with_payload_and_proxy_auth + (grpc_end2end_test_config config, + grpc_end2end_test_fixture f) { + /* Create large request and response bodies. These are big enough to require + * multiple round trips to deliver to the peer, and their exact contents of + * will be verified on completion. */ + grpc_slice request_payload_slice = generate_random_slice(); + grpc_slice response_payload_slice = generate_random_slice(); + + grpc_call *c; + grpc_call *s; + grpc_byte_buffer *request_payload = + grpc_raw_byte_buffer_create(&request_payload_slice, 1); + grpc_byte_buffer *response_payload = + grpc_raw_byte_buffer_create(&response_payload_slice, 1); + cq_verifier *cqv = cq_verifier_create(f.cq); + grpc_op ops[6]; + grpc_op *op; + grpc_metadata_array initial_metadata_recv; + grpc_metadata_array trailing_metadata_recv; + grpc_metadata_array request_metadata_recv; + grpc_byte_buffer *request_payload_recv = NULL; + grpc_byte_buffer *response_payload_recv = NULL; + grpc_call_details call_details; + grpc_status_code status; + grpc_call_error error; + grpc_slice details; + int was_cancelled = 2; + + gpr_timespec deadline = n_seconds_from_now(60); + c = grpc_channel_create_call( + f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, + grpc_slice_from_static_string("/foo"), + get_host_override_slice("foo.test.google.fr:1234", config), deadline, + NULL); + GPR_ASSERT(c); + + grpc_metadata_array_init(&initial_metadata_recv); + grpc_metadata_array_init(&trailing_metadata_recv); + grpc_metadata_array_init(&request_metadata_recv); + grpc_call_details_init(&call_details); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = request_payload; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_INITIAL_METADATA; + op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message.recv_message = &response_payload_recv; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; + op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; + op->data.recv_status_on_client.status = &status; + op->data.recv_status_on_client.status_details = &details; + op->flags = 0; + op->reserved = NULL; + op++; + error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); + GPR_ASSERT(GRPC_CALL_OK == error); + + error = + grpc_server_request_call(f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101)); + GPR_ASSERT(GRPC_CALL_OK == error); + CQ_EXPECT_COMPLETION(cqv, tag(101), 1); + cq_verify(cqv); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_SEND_INITIAL_METADATA; + op->data.send_initial_metadata.count = 0; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_RECV_MESSAGE; + op->data.recv_message.recv_message = &request_payload_recv; + op->flags = 0; + op->reserved = NULL; + op++; + error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL); + GPR_ASSERT(GRPC_CALL_OK == error); + + CQ_EXPECT_COMPLETION(cqv, tag(102), 1); + cq_verify(cqv); + + memset(ops, 0, sizeof(ops)); + op = ops; + op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; + op->data.recv_close_on_server.cancelled = &was_cancelled; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_MESSAGE; + op->data.send_message.send_message = response_payload; + op->flags = 0; + op->reserved = NULL; + op++; + op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; + op->data.send_status_from_server.trailing_metadata_count = 0; + op->data.send_status_from_server.status = GRPC_STATUS_OK; + grpc_slice status_details = grpc_slice_from_static_string("xyz"); + op->data.send_status_from_server.status_details = &status_details; + op->flags = 0; + op->reserved = NULL; + op++; + error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL); + GPR_ASSERT(GRPC_CALL_OK == error); + + CQ_EXPECT_COMPLETION(cqv, tag(103), 1); + CQ_EXPECT_COMPLETION(cqv, tag(1), 1); + cq_verify(cqv); + + GPR_ASSERT(status == GRPC_STATUS_OK); + GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz")); + GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo")); + validate_host_override_string("foo.test.google.fr:1234", call_details.host, + config); + GPR_ASSERT(was_cancelled == 0); + GPR_ASSERT(byte_buffer_eq_slice(request_payload_recv, request_payload_slice)); + GPR_ASSERT( + byte_buffer_eq_slice(response_payload_recv, response_payload_slice)); + + grpc_slice_unref(details); + grpc_metadata_array_destroy(&initial_metadata_recv); + grpc_metadata_array_destroy(&trailing_metadata_recv); + grpc_metadata_array_destroy(&request_metadata_recv); + grpc_call_details_destroy(&call_details); + + grpc_call_unref(c); + grpc_call_unref(s); + + cq_verifier_destroy(cqv); + + grpc_byte_buffer_destroy(request_payload); + grpc_byte_buffer_destroy(response_payload); + grpc_byte_buffer_destroy(request_payload_recv); + grpc_byte_buffer_destroy(response_payload_recv); +} + +/* Client sends a request with payload, server reads then returns a response + payload and status. */ +static void test_invoke_request_response_with_payload_and_proxy_auth( + grpc_end2end_test_config config) { + /* Indicate that the proxy requires user auth */ + grpc_arg client_arg = {.type = GRPC_ARG_INTEGER, + .key = GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT, + .value.integer = 0}; + grpc_channel_args client_args = {.num_args = 1, .args = &client_arg}; + grpc_end2end_test_fixture f = begin_test( + config, "test_invoke_request_response_with_payload_and_proxy_auth", + &client_args, NULL); + request_response_with_payload_and_proxy_auth(config, f); + end_test(&f); + config.tear_down_data(&f); +} + +static void test_invoke_10_request_response_with_payload_and_proxy_auth( + grpc_end2end_test_config config) { + int i; + /* Indicate that the proxy requires user auth */ + grpc_arg client_arg = {.type = GRPC_ARG_INTEGER, + .key = GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT, + .value.integer = 0}; + grpc_channel_args client_args = {.num_args = 1, .args = &client_arg}; + grpc_end2end_test_fixture f = begin_test( + config, "test_invoke_10_request_response_with_payload_and_proxy_auth", + &client_args, NULL); + for (i = 0; i < 10; i++) { + request_response_with_payload_and_proxy_auth(config, f); + } + end_test(&f); + config.tear_down_data(&f); +} + +void payload_with_proxy_auth(grpc_end2end_test_config config) { + test_invoke_request_response_with_payload_and_proxy_auth(config); + test_invoke_10_request_response_with_payload_and_proxy_auth(config); +} + +void payload_with_proxy_auth_pre_init(void) {} diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 02562bfae4d..61eab0bac9d 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7256,6 +7256,7 @@ "test/core/end2end/tests/no_logging.c", "test/core/end2end/tests/no_op.c", "test/core/end2end/tests/payload.c", + "test/core/end2end/tests/payload_with_proxy_auth.c", "test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping_pong_streaming.c", "test/core/end2end/tests/registered_call.c", @@ -7333,6 +7334,7 @@ "test/core/end2end/tests/no_logging.c", "test/core/end2end/tests/no_op.c", "test/core/end2end/tests/payload.c", + "test/core/end2end/tests/payload_with_proxy_auth.c", "test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping_pong_streaming.c", "test/core/end2end/tests/registered_call.c", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index dd812414b9f..f71da435163 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -6585,6 +6585,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_census_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -7854,6 +7877,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -9063,6 +9109,28 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_fakesec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -10195,6 +10263,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_fd_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -11418,6 +11509,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -12535,6 +12649,25 @@ "linux" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_test", + "platforms": [ + "linux" + ] + }, { "args": [ "ping" @@ -13686,6 +13819,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -14955,6 +15111,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+workarounds_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -16257,6 +16436,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -17544,6 +17747,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_load_reporting_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -18848,14 +19074,14 @@ }, { "args": [ - "ping" + "payload_with_proxy_auth" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -18872,7 +19098,7 @@ }, { "args": [ - "ping_pong_streaming" + "ping" ], "ci_platforms": [ "windows", @@ -18896,31 +19122,7 @@ }, { "args": [ - "registered_call" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "request_with_flags" + "ping_pong_streaming" ], "ci_platforms": [ "windows", @@ -18944,14 +19146,14 @@ }, { "args": [ - "request_with_payload" + "registered_call" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -18968,14 +19170,14 @@ }, { "args": [ - "resource_quota_server" + "request_with_flags" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -18992,7 +19194,55 @@ }, { "args": [ - "server_finishes_request" + "request_with_payload" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "resource_quota_server" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_oauth2_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "server_finishes_request" ], "ci_platforms": [ "windows", @@ -19998,6 +20248,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -21126,6 +21400,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -22230,6 +22528,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -23422,6 +23744,32 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [ + "msan" + ], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -24667,6 +25015,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -25936,6 +26307,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_ssl_cert_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -27070,6 +27464,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_ssl_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -28260,14 +28678,14 @@ }, { "args": [ - "ping" + "payload_with_proxy_auth" ], "ci_platforms": [ "linux", "mac", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -28283,7 +28701,7 @@ }, { "args": [ - "ping_pong_streaming" + "ping" ], "ci_platforms": [ "linux", @@ -28306,30 +28724,7 @@ }, { "args": [ - "registered_call" - ], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_uds_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "request_with_flags" + "ping_pong_streaming" ], "ci_platforms": [ "linux", @@ -28352,14 +28747,14 @@ }, { "args": [ - "request_with_payload" + "registered_call" ], "ci_platforms": [ "linux", "mac", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -28375,14 +28770,14 @@ }, { "args": [ - "resource_quota_server" + "request_with_flags" ], "ci_platforms": [ "linux", "mac", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -28398,7 +28793,7 @@ }, { "args": [ - "server_finishes_request" + "request_with_payload" ], "ci_platforms": [ "linux", @@ -28421,14 +28816,14 @@ }, { "args": [ - "shutdown_finishes_calls" + "resource_quota_server" ], "ci_platforms": [ "linux", "mac", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -28444,7 +28839,7 @@ }, { "args": [ - "shutdown_finishes_tags" + "server_finishes_request" ], "ci_platforms": [ "linux", @@ -28467,7 +28862,53 @@ }, { "args": [ - "simple_cacheable_request" + "shutdown_finishes_calls" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_uds_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "shutdown_finishes_tags" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_uds_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "simple_cacheable_request" ], "ci_platforms": [ "linux", @@ -29504,6 +29945,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_census_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -30750,6 +31214,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_compress_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -31854,6 +32341,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_fd_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -33054,6 +33564,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -34152,6 +34685,25 @@ "linux" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "linux" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_full+pipe_nosec_test", + "platforms": [ + "linux" + ] + }, { "args": [ "ping" @@ -35280,6 +35832,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -36526,6 +37101,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_full+workarounds_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -37804,6 +38402,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_http_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -39068,6 +39690,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [], + "flaky": false, + "language": "c", + "name": "h2_load_reporting_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" @@ -40178,6 +40823,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_proxy_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -41282,6 +41951,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -42362,6 +43055,30 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair+trace_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -43528,6 +44245,32 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [ + "msan" + ], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_sockpair_1byte_nosec_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping_pong_streaming" @@ -44723,6 +45466,29 @@ "posix" ] }, + { + "args": [ + "payload_with_proxy_auth" + ], + "ci_platforms": [ + "linux", + "mac", + "posix" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_uds_nosec_test", + "platforms": [ + "linux", + "mac", + "posix" + ] + }, { "args": [ "ping" diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj index 4d02c880825..3a45ed619c0 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj @@ -227,6 +227,8 @@ + + diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters index 89316bc535f..69bcf02b43d 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters @@ -115,6 +115,9 @@ test\core\end2end\tests + + test\core\end2end\tests + test\core\end2end\tests diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj index 95731114523..fe1f6279805 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj @@ -229,6 +229,8 @@ + + diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters index 7d02fc3fa07..255a76e107e 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters @@ -118,6 +118,9 @@ test\core\end2end\tests + + test\core\end2end\tests + test\core\end2end\tests From ad11680be7e33bb813073b79286e5ada0c5830f2 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 19 Jul 2017 11:00:22 -0700 Subject: [PATCH 32/47] remove unnecessary code --- test/core/end2end/fixtures/h2_http_proxy.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/core/end2end/fixtures/h2_http_proxy.c b/test/core/end2end/fixtures/h2_http_proxy.c index f87036d52e2..817a9a0a094 100644 --- a/test/core/end2end/fixtures/h2_http_proxy.c +++ b/test/core/end2end/fixtures/h2_http_proxy.c @@ -53,9 +53,6 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( const int server_port = grpc_pick_unused_port_or_die(); gpr_join_host_port(&ffd->server_addr, "localhost", server_port); - /*const grpc_arg *proxy_auth_arg = - grpc_channel_args_find(client_args, "test_uses_proxy_auth"); - ffd->proxy = grpc_end2end_http_proxy_create(proxy_args);*/ //If we are testing proxy auth, add the proxy auth arg to proxy channel args grpc_channel_args *proxy_args = NULL; const grpc_arg *proxy_auth_arg = grpc_channel_args_find( From e929547d48d645cb5fbf3cf0db1663435c899800 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Wed, 19 Jul 2017 15:58:40 -0700 Subject: [PATCH 33/47] Fix issues with Node Mac build --- binding.gyp | 60 ++++++++++++++++++++++++++-------- templates/binding.gyp.template | 45 ++++++++++++++++++------- 2 files changed, 79 insertions(+), 26 deletions(-) diff --git a/binding.gyp b/binding.gyp index d11a60a68af..705b8f22c0c 100644 --- a/binding.gyp +++ b/binding.gyp @@ -175,21 +175,27 @@ }], ['OS == "mac"', { 'xcode_settings': { - 'MACOSX_DEPLOYMENT_TARGET': '10.9' + 'OTHER_CFLAGS': [ + '-g', + '-Wall', + '-Wextra', + '-Werror', + '-Wno-long-long', + '-Wno-unused-parameter', + '-DOSATOMIC_USE_INLINED=1', + ], + 'OTHER_CPLUSPLUSFLAGS': [ + '-g', + '-Wall', + '-Wextra', + '-Werror', + '-Wno-long-long', + '-Wno-unused-parameter', + '-DOSATOMIC_USE_INLINED=1', + '-stdlib=libc++', + '-std=c++11' + ], }, - 'OTHER_CFLAGS': [ - '-g', - '-Wall', - '-Wextra', - '-Werror', - '-Wno-long-long', - '-Wno-unused-parameter', - '-DOSATOMIC_USE_INLINED=1', - ], - 'OTHER_CPLUSPLUSFLAGS': [ - '-stdlib=libc++', - '-std=c++11' - ], }] ] }, @@ -508,6 +514,13 @@ 'third_party/boringssl/ssl/tls_method.c', 'third_party/boringssl/ssl/tls_record.c', ], + 'conditions': [ + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } + }] + ] }, ], }], @@ -626,6 +639,13 @@ 'src/core/lib/support/tmpfile_windows.c', 'src/core/lib/support/wrap_memcpy.c', ], + 'conditions': [ + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } + }] + ] }, { 'target_name': 'grpc', @@ -889,6 +909,13 @@ 'src/core/ext/filters/workarounds/workaround_utils.c', 'src/core/plugin_registry/grpc_plugin_registry.c', ], + 'conditions': [ + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } + }] + ] }, { 'include_dirs': [ @@ -914,6 +941,11 @@ 'ldflags': [ '-Wl,-wrap,memcpy' ] + }], + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } }] ], "target_name": "grpc_node", diff --git a/templates/binding.gyp.template b/templates/binding.gyp.template index b7560fe7df5..b1e1374249d 100644 --- a/templates/binding.gyp.template +++ b/templates/binding.gyp.template @@ -165,19 +165,21 @@ }], ['OS == "mac"', { 'xcode_settings': { - 'MACOSX_DEPLOYMENT_TARGET': '10.9' + % if defaults['global'].get('CPPFLAGS', None) is not None: + 'OTHER_CFLAGS': [ + % for item in defaults['global'].get('CPPFLAGS').split(): + '${item}', + % endfor + ], + 'OTHER_CPLUSPLUSFLAGS': [ + % for item in defaults['global'].get('CPPFLAGS').split(): + '${item}', + % endfor + '-stdlib=libc++', + '-std=c++11' + ], + % endif }, - % if defaults['global'].get('CPPFLAGS', None) is not None: - 'OTHER_CFLAGS': [ - % for item in defaults['global'].get('CPPFLAGS').split(): - '${item}', - % endfor - ], - 'OTHER_CPLUSPLUSFLAGS': [ - '-stdlib=libc++', - '-std=c++11' - ], - % endif }] ] }, @@ -201,6 +203,13 @@ '${source}', % endfor ], + 'conditions': [ + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } + }] + ] }, % endif % endfor @@ -282,6 +291,13 @@ '${source}', % endfor ], + 'conditions': [ + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } + }] + ] }, % endif % endfor @@ -317,6 +333,11 @@ 'ldflags': [ '-Wl,-wrap,memcpy' ] + }], + ['OS == "mac"', { + 'xcode_settings': { + 'MACOSX_DEPLOYMENT_TARGET': '10.9' + } }] ], "target_name": "${module.name}", From 78d7125017951d17b4e9ee2b8d184c5beed65d7c Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 19 Jul 2017 16:33:16 -0700 Subject: [PATCH 34/47] Removing a few style issues --- .../ext/filters/client_channel/http_proxy.c | 44 +++++++++---- test/core/end2end/fixtures/h2_http_proxy.c | 7 +- .../end2end/fixtures/http_proxy_fixture.c | 66 +++++++++++++------ 3 files changed, 84 insertions(+), 33 deletions(-) diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index 1bd847b0dbc..a8a23ceb9ed 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -34,12 +34,19 @@ #include "src/core/lib/support/string.h" #include "src/core/lib/slice/b64.h" -static void grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, - char **name_to_resolve, +/** + * Parses the 'http_proxy' env var and returns the proxy hostname to resolve or + * NULL on error. Also sets 'user_cred' if it is not NULL to user credentials + * if present in the 'http_proxy' env var. + */ +static char *grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, char **user_cred) { - *name_to_resolve = NULL; + char *proxy_name = NULL; + if(user_cred != NULL) { + *user_cred = NULL; + } char* uri_str = gpr_getenv("http_proxy"); - if (uri_str == NULL) return; + if (uri_str == NULL) return NULL; grpc_uri* uri = grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */); if (uri == NULL || uri->authority == NULL) { @@ -50,18 +57,33 @@ static void grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, gpr_log(GPR_ERROR, "'%s' scheme not supported in proxy URI", uri->scheme); goto done; } - char *user_cred_end = strchr(uri->authority, '@'); - if (user_cred_end != NULL) { - *name_to_resolve = gpr_strdup(user_cred_end + 1); - *user_cred_end = '\0'; - *user_cred = gpr_strdup(uri->authority); + /* Split on '@' to separate user credentials from host */ + char **authority_strs = NULL; + size_t authority_nstrs; + gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs); + GPR_ASSERT(authority_nstrs != 0); /* should have atleast 1 string */ + if(authority_nstrs == 1) { + /* User cred not present in authority */ + proxy_name = gpr_strdup(authority_strs[0]); + } else if(authority_nstrs == 2) { + /* User cred found */ + if(user_cred != NULL) { + *user_cred = gpr_strdup(authority_strs[0]); + } + proxy_name = gpr_strdup(authority_strs[1]); gpr_log(GPR_INFO, "userinfo found in proxy URI"); } else { - *name_to_resolve = gpr_strdup(uri->authority); + /* Bad authority */ + proxy_name = NULL; + } + for(size_t i = 0; i < authority_nstrs; i++) { + gpr_free(authority_strs[i]); } + gpr_free(authority_strs); done: gpr_free(uri_str); grpc_uri_destroy(uri); + return proxy_name; } static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, @@ -71,7 +93,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, char** name_to_resolve, grpc_channel_args** new_args) { char *user_cred = NULL; - grpc_get_http_proxy_server(exec_ctx, name_to_resolve, &user_cred); + *name_to_resolve = grpc_get_http_proxy_server(exec_ctx, &user_cred); if (*name_to_resolve == NULL) return false; grpc_uri* uri = grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); diff --git a/test/core/end2end/fixtures/h2_http_proxy.c b/test/core/end2end/fixtures/h2_http_proxy.c index 817a9a0a094..ce93c614dfa 100644 --- a/test/core/end2end/fixtures/h2_http_proxy.c +++ b/test/core/end2end/fixtures/h2_http_proxy.c @@ -53,11 +53,12 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( const int server_port = grpc_pick_unused_port_or_die(); gpr_join_host_port(&ffd->server_addr, "localhost", server_port); - //If we are testing proxy auth, add the proxy auth arg to proxy channel args + /* If we are testing proxy auth, add the proxy auth arg to proxy channel args + */ grpc_channel_args *proxy_args = NULL; const grpc_arg *proxy_auth_arg = grpc_channel_args_find( client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT); - if(proxy_auth_arg) { + if(proxy_auth_arg != NULL) { proxy_args = grpc_channel_args_copy_and_add(NULL, proxy_auth_arg, 1); } ffd->proxy = grpc_end2end_http_proxy_create(proxy_args); @@ -77,7 +78,7 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f, fullstack_fixture_data *ffd = f->fixture_data; char *proxy_uri; - // If testing for proxy auth, add credentials to proxy uri + /* If testing for proxy auth, add credentials to proxy uri */ if(grpc_channel_args_find( client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) == NULL) { gpr_asprintf(&proxy_uri, "http://%s", diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index d69ed1a0866..239444e75fa 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -47,8 +47,9 @@ #include "src/core/lib/iomgr/tcp_client.h" #include "src/core/lib/iomgr/tcp_server.h" #include "src/core/lib/iomgr/timer.h" -#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/b64.h" +#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/support/string.h" #include "test/core/util/port.h" struct grpc_end2end_http_proxy { @@ -306,6 +307,45 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg, &conn->on_write_response_done); } +/** + * Parses the proxy auth header value to check if it matches :- + * Basic + * Returns true if it matches, false otherwise + */ +static bool proxy_auth_header_matches(grpc_exec_ctx *exec_ctx, + char *proxy_auth_header_val) { + if(proxy_auth_header_val == NULL) { + return false; + } + char **auth_header_strs; + size_t auth_header_nstrs; + bool auth_header_matches = false; + // Split the auth header value on space + gpr_string_split(proxy_auth_header_val, " ", &auth_header_strs, + &auth_header_nstrs); + if(auth_header_nstrs != 2) { + goto done; + } + // Authentication type should be Basic + if(strcmp(auth_header_strs[0], "Basic") != 0) { + goto done; + } + // should match GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED after decoding + grpc_slice decoded_slice = + grpc_base64_decode(exec_ctx, auth_header_strs[1], 0); + if(grpc_slice_str_cmp( + decoded_slice, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED) != 0) { + goto done; + } + auth_header_matches = true; +done: + for(size_t i = 0; i < auth_header_nstrs; i++) { + gpr_free(auth_header_strs[i]); + } + gpr_free(auth_header_strs); + return auth_header_matches; +} + // Callback to read the HTTP CONNECT request. // TODO(roth): Technically, for any of the failure modes handled by this // function, we should handle the error by returning an HTTP response to @@ -354,34 +394,22 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, GRPC_ERROR_UNREF(error); return; } - // If proxy auth is being used, check if the header is present + // If proxy auth is being used, check if the header is present and as expected if(grpc_channel_args_find( conn->proxy->channel_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) != NULL) { - bool found = false, failed = false; + bool auth_header_found = false; for(size_t i = 0; i < conn->http_request.hdr_count; i++) { if(strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) { - found = true; - // Authentication type should be Basic - if(strncmp(conn->http_request.hdrs[i].value, "Basic", - strlen("Basic")) != 0) { - failed = true; - break; - } - // Check if encoded string is as expected - char *encoded_str_start = - strchr(conn->http_request.hdrs[i].value, ' ') + 1; - grpc_slice decoded_slice = - grpc_base64_decode(exec_ctx, encoded_str_start, 0); - if(grpc_slice_str_cmp( - decoded_slice, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED) != 0) { - failed = true; + if(!proxy_auth_header_matches( + exec_ctx, conn->http_request.hdrs[i].value)) { break; } + auth_header_found = true; break; } } - if(!found || failed) { + if(!auth_header_found) { const char *msg = "HTTP Connect could not verify authentication"; error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); proxy_connection_failed(exec_ctx, conn, true /* is_client */, From 7cc30c1155805537e18affd13cc4b7e44c3927be Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 18 Jul 2017 23:19:24 -0700 Subject: [PATCH 35/47] add missing fields on server call context and improve robustness of finished calls --- src/ruby/lib/grpc/generic/active_call.rb | 14 +- src/ruby/lib/grpc/generic/rpc_server.rb | 1 + src/ruby/spec/generic/client_stub_spec.rb | 166 +++++++++++++++------- src/ruby/spec/generic/rpc_server_spec.rb | 145 +++++++++++++++++++ 4 files changed, 276 insertions(+), 50 deletions(-) diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index 4a748a4ac29..84c88871680 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -44,9 +44,9 @@ module GRPC include Core::TimeConsts include Core::CallOps extend Forwardable - attr_reader :deadline, :metadata_sent, :metadata_to_send + attr_reader :deadline, :metadata_sent, :metadata_to_send, :peer, :peer_cert def_delegators :@call, :cancel, :metadata, :write_flag, :write_flag=, - :peer, :peer_cert, :trailing_metadata, :status + :trailing_metadata, :status # client_invoke begins a client invocation. # @@ -105,8 +105,13 @@ module GRPC @input_stream_done = false @call_finished = false @call_finished_mu = Mutex.new + @client_call_executed = false @client_call_executed_mu = Mutex.new + + # set the peer now so that the accessor can still function + # after the server closes the call + @peer = call.peer end # Sends the initial metadata that has yet to be sent. @@ -541,6 +546,10 @@ module GRPC end end + def attach_peer_cert(peer_cert) + @peer_cert = peer_cert + end + private # To be called once the "input stream" has been completelly @@ -612,6 +621,7 @@ module GRPC # server client_streamer handlers. MultiReqView = view_class(:cancelled?, :deadline, :each_queued_msg, :each_remote_read, :metadata, :output_metadata, + :peer, :peer_cert, :send_initial_metadata, :metadata_to_send, :merge_metadata_to_send, diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index ef2cc0ce918..33b3cea1fc3 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -418,6 +418,7 @@ module GRPC metadata_received: true, started: false, metadata_to_send: connect_md) + c.attach_peer_cert(an_rpc.call.peer_cert) mth = an_rpc.method.to_sym [c, mth] end diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index 7b5e6a95a43..a8653e73cfd 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -37,7 +37,9 @@ include GRPC::Core::TimeConsts include GRPC::Core::CallOps # check that methods on a finished/closed call t crash -def check_op_view_of_finished_client_call_is_robust(op_view) +def check_op_view_of_finished_client_call(op_view, + expected_metadata, + expected_trailing_metadata) # use read_response_stream to try to iterate through # possible response stream fail('need something to attempt reads') unless block_given? @@ -48,21 +50,39 @@ def check_op_view_of_finished_client_call_is_robust(op_view) expect { op_view.start_call }.to raise_error(RuntimeError) + sanity_check_values_of_accessors(op_view, + expected_metadata, + expected_trailing_metadata) + expect do op_view.wait op_view.cancel - - op_view.metadata - op_view.trailing_metadata - op_view.status - - op_view.cancelled? - op_view.deadline - op_view.write_flag op_view.write_flag = 1 end.to_not raise_error end +def sanity_check_values_of_accessors(op_view, + expected_metadata, + expected_trailing_metadata) + expected_status = Struct::Status.new + expected_status.code = 0 + expected_status.details = 'OK' + expected_status.metadata = expected_trailing_metadata + + expect(op_view.status).to eq(expected_status) + expect(op_view.metadata).to eq(expected_metadata) + expect(op_view.trailing_metadata).to eq(expected_trailing_metadata) + + expect(op_view.cancelled?).to be(false) + expect(op_view.write_flag).to be(nil) + + # The deadline attribute of a call can be either + # a GRPC::Core::TimeSpec or a Time, which are mutually exclusive. + # TODO: fix so that the accessor always returns the same type. + expect(op_view.deadline.is_a?(GRPC::Core::TimeSpec) || + op_view.deadline.is_a?(Time)).to be(true) +end + describe 'ClientStub' do let(:noop) { proc { |x| x } } @@ -154,7 +174,7 @@ describe 'ClientStub' do server_port = create_test_server host = "localhost:#{server_port}" th = run_request_response(@sent_msg, @resp, @pass, - k1: 'v1', k2: 'v2') + expected_metadata: { k1: 'v1', k2: 'v2' }) stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) expect(get_response(stub)).to eq(@resp) th.join @@ -261,8 +281,14 @@ describe 'ClientStub' do def run_op_view_metadata_test(run_start_call_first) server_port = create_test_server host = "localhost:#{server_port}" - th = run_request_response(@sent_msg, @resp, @pass, - k1: 'v1', k2: 'v2') + + @server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' } + @server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' } + th = run_request_response( + @sent_msg, @resp, @pass, + expected_metadata: @metadata, + server_initial_md: @server_initial_md, + server_trailing_md: @server_trailing_md) stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) expect( get_response(stub, @@ -272,12 +298,14 @@ describe 'ClientStub' do it 'sends metadata to the server ok when running start_call first' do run_op_view_metadata_test(true) - check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) { |r| p r } end it 'does not crash when used after the call has been finished' do run_op_view_metadata_test(false) - check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) { |r| p r } end end end @@ -300,7 +328,8 @@ describe 'ClientStub' do end it 'should send metadata to the server ok' do - th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata) + th = run_client_streamer(@sent_msgs, @resp, @pass, + expected_metadata: @metadata) expect(get_response(@stub)).to eq(@resp) th.join end @@ -347,7 +376,13 @@ describe 'ClientStub' do it_behaves_like 'client streaming' def run_op_view_metadata_test(run_start_call_first) - th = run_client_streamer(@sent_msgs, @resp, @pass, **@metadata) + @server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' } + @server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' } + th = run_client_streamer( + @sent_msgs, @resp, @pass, + expected_metadata: @metadata, + server_initial_md: @server_initial_md, + server_trailing_md: @server_trailing_md) expect( get_response(@stub, run_start_call_first: run_start_call_first)).to eq(@resp) @@ -356,12 +391,14 @@ describe 'ClientStub' do it 'sends metadata to the server ok when running start_call first' do run_op_view_metadata_test(true) - check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) { |r| p r } end it 'does not crash when used after the call has been finished' do run_op_view_metadata_test(false) - check_op_view_of_finished_client_call_is_robust(@op) { |r| p r } + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) { |r| p r } end end end @@ -396,7 +433,7 @@ describe 'ClientStub' do server_port = create_test_server host = "localhost:#{server_port}" th = run_server_streamer(@sent_msg, @replys, @fail, - k1: 'v1', k2: 'v2') + expected_metadata: { k1: 'v1', k2: 'v2' }) stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) e = get_responses(stub) expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) @@ -459,24 +496,31 @@ describe 'ClientStub' do def run_op_view_metadata_test(run_start_call_first) server_port = create_test_server host = "localhost:#{server_port}" - th = run_server_streamer(@sent_msg, @replys, @fail, - k1: 'v1', k2: 'v2') + @server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' } + @server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' } + th = run_server_streamer( + @sent_msg, @replys, @pass, + expected_metadata: @metadata, + server_initial_md: @server_initial_md, + server_trailing_md: @server_trailing_md) stub = GRPC::ClientStub.new(host, :this_channel_is_insecure) e = get_responses(stub, run_start_call_first: run_start_call_first) - expect { e.collect { |r| r } }.to raise_error(GRPC::BadStatus) + expect(e.collect { |r| r }).to eq(@replys) th.join end it 'should send metadata to the server ok when start_call is run first' do run_op_view_metadata_test(true) - check_op_view_of_finished_client_call_is_robust(@op) do |responses| + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) do |responses| responses.each { |r| p r } end end it 'does not crash when used after the call has been finished' do run_op_view_metadata_test(false) - check_op_view_of_finished_client_call_is_robust(@op) do |responses| + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) do |responses| responses.each { |r| p r } end end @@ -530,7 +574,7 @@ describe 'ClientStub' do it 'should send metadata to the server ok' do th = run_bidi_streamer_echo_ping_pong(@sent_msgs, @pass, true, - **@metadata) + expected_metadata: @metadata) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub) expect(e.collect { |r| r }).to eq(@sent_msgs) @@ -567,40 +611,52 @@ describe 'ClientStub' do it_behaves_like 'bidi streaming' def run_op_view_metadata_test(run_start_call_first) - th = run_bidi_streamer_handle_inputs_first(@sent_msgs, @replys, - @pass) + @server_initial_md = { 'sk1' => 'sv1', 'sk2' => 'sv2' } + @server_trailing_md = { 'tk1' => 'tv1', 'tk2' => 'tv2' } + th = run_bidi_streamer_echo_ping_pong( + @sent_msgs, @pass, true, + expected_metadata: @metadata, + server_initial_md: @server_initial_md, + server_trailing_md: @server_trailing_md) stub = GRPC::ClientStub.new(@host, :this_channel_is_insecure) e = get_responses(stub, run_start_call_first: run_start_call_first) - expect(e.collect { |r| r }).to eq(@replys) + expect(e.collect { |r| r }).to eq(@sent_msgs) th.join end it 'can run start_call before executing the call' do run_op_view_metadata_test(true) - check_op_view_of_finished_client_call_is_robust(@op) do |responses| + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) do |responses| responses.each { |r| p r } end end it 'doesnt crash when op_view used after call has finished' do run_op_view_metadata_test(false) - check_op_view_of_finished_client_call_is_robust(@op) do |responses| + check_op_view_of_finished_client_call( + @op, @server_initial_md, @server_trailing_md) do |responses| responses.each { |r| p r } end end end end - def run_server_streamer(expected_input, replys, status, **kw) - wanted_metadata = kw.clone + def run_server_streamer(expected_input, replys, status, + expected_metadata: {}, + server_initial_md: {}, + server_trailing_md: {}) + wanted_metadata = expected_metadata.clone wakey_thread do |notifier| - c = expect_server_to_be_invoked(notifier) + c = expect_server_to_be_invoked( + notifier, metadata_to_send: server_initial_md) wanted_metadata.each do |k, v| expect(c.metadata[k.to_s]).to eq(v) end expect(c.remote_read).to eq(expected_input) replys.each { |r| c.remote_send(r) } - c.send_status(status, status == @pass ? 'OK' : 'NOK', true) + c.send_status(status, status == @pass ? 'OK' : 'NOK', true, + metadata: server_trailing_md) end end @@ -615,10 +671,13 @@ describe 'ClientStub' do end def run_bidi_streamer_echo_ping_pong(expected_inputs, status, client_starts, - **kw) - wanted_metadata = kw.clone + expected_metadata: {}, + server_initial_md: {}, + server_trailing_md: {}) + wanted_metadata = expected_metadata.clone wakey_thread do |notifier| - c = expect_server_to_be_invoked(notifier) + c = expect_server_to_be_invoked( + notifier, metadata_to_send: server_initial_md) wanted_metadata.each do |k, v| expect(c.metadata[k.to_s]).to eq(v) end @@ -631,33 +690,44 @@ describe 'ClientStub' do expect(c.remote_read).to eq(i) end end - c.send_status(status, status == @pass ? 'OK' : 'NOK', true) + c.send_status(status, status == @pass ? 'OK' : 'NOK', true, + metadata: server_trailing_md) end end - def run_client_streamer(expected_inputs, resp, status, **kw) - wanted_metadata = kw.clone + def run_client_streamer(expected_inputs, resp, status, + expected_metadata: {}, + server_initial_md: {}, + server_trailing_md: {}) + wanted_metadata = expected_metadata.clone wakey_thread do |notifier| - c = expect_server_to_be_invoked(notifier) + c = expect_server_to_be_invoked( + notifier, metadata_to_send: server_initial_md) expected_inputs.each { |i| expect(c.remote_read).to eq(i) } wanted_metadata.each do |k, v| expect(c.metadata[k.to_s]).to eq(v) end c.remote_send(resp) - c.send_status(status, status == @pass ? 'OK' : 'NOK', true) + c.send_status(status, status == @pass ? 'OK' : 'NOK', true, + metadata: server_trailing_md) end end - def run_request_response(expected_input, resp, status, **kw) - wanted_metadata = kw.clone + def run_request_response(expected_input, resp, status, + expected_metadata: {}, + server_initial_md: {}, + server_trailing_md: {}) + wanted_metadata = expected_metadata.clone wakey_thread do |notifier| - c = expect_server_to_be_invoked(notifier) + c = expect_server_to_be_invoked( + notifier, metadata_to_send: server_initial_md) expect(c.remote_read).to eq(expected_input) wanted_metadata.each do |k, v| expect(c.metadata[k.to_s]).to eq(v) end c.remote_send(resp) - c.send_status(status, status == @pass ? 'OK' : 'NOK', true) + c.send_status(status, status == @pass ? 'OK' : 'NOK', true, + metadata: server_trailing_md) end end @@ -675,13 +745,13 @@ describe 'ClientStub' do @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure) end - def expect_server_to_be_invoked(notifier) + def expect_server_to_be_invoked(notifier, metadata_to_send: nil) @server.start notifier.notify(nil) recvd_rpc = @server.request_call recvd_call = recvd_rpc.call recvd_call.metadata = recvd_rpc.metadata - recvd_call.run_batch(SEND_INITIAL_METADATA => nil) + recvd_call.run_batch(SEND_INITIAL_METADATA => metadata_to_send) GRPC::ActiveCall.new(recvd_call, noop, noop, INFINITE_FUTURE, metadata_received: true) end diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index 9633a828a2a..4258d59851d 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -111,6 +111,47 @@ end SlowStub = SlowService.rpc_stub_class +# a test service that hangs onto call objects +# and uses them after the server-side call has been +# finished +class CheckCallAfterFinishedService + include GRPC::GenericService + rpc :an_rpc, EchoMsg, EchoMsg + rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg + rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg) + rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg) + attr_reader :server_side_call + + def an_rpc(req, call) + fail 'shouldnt reuse service' unless @call.nil? + @server_side_call = call + req + end + + def a_client_streaming_rpc(call) + fail 'shouldnt reuse service' unless @call.nil? + @server_side_call = call + # iterate through requests so call can complete + call.each_remote_read.each { |r| p r } + EchoMsg.new + end + + def a_server_streaming_rpc(_, call) + fail 'shouldnt reuse service' unless @call.nil? + @server_side_call = call + [EchoMsg.new, EchoMsg.new] + end + + def a_bidi_rpc(requests, call) + fail 'shouldnt reuse service' unless @call.nil? + @server_side_call = call + requests.each { |r| p r } + [EchoMsg.new, EchoMsg.new] + end +end + +CheckCallAfterFinishedServiceStub = CheckCallAfterFinishedService.rpc_stub_class + describe GRPC::RpcServer do RpcServer = GRPC::RpcServer StatusCodes = GRPC::Core::StatusCodes @@ -505,5 +546,109 @@ describe GRPC::RpcServer do t.join end end + + context 'when call objects are used after calls have completed' do + before(:each) do + server_opts = { + poll_period: 1 + } + @srv = RpcServer.new(**server_opts) + alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure) + @alt_host = "0.0.0.0:#{alt_port}" + + @service = CheckCallAfterFinishedService.new + @srv.handle(@service) + @srv_thd = Thread.new { @srv.run } + @srv.wait_till_running + end + + # check that the server-side call is still in a usable state even + # after it has finished + def check_single_req_view_of_finished_call(call) + common_check_of_finished_server_call(call) + + expect(call.peer).to be_a(String) + expect(call.peer_cert).to be(nil) + end + + def check_multi_req_view_of_finished_call(call) + common_check_of_finished_server_call(call) + + expect do + call.each_remote_read.each { |r| p r } + end.to raise_error(GRPC::Core::CallError) + end + + def common_check_of_finished_server_call(call) + expect do + call.merge_metadata_to_send({}) + end.to raise_error(RuntimeError) + + expect do + call.send_initial_metadata + end.to_not raise_error + + expect(call.cancelled?).to be(false) + expect(call.metadata).to be_a(Hash) + expect(call.metadata['user-agent']).to be_a(String) + + expect(call.metadata_sent).to be(true) + expect(call.output_metadata).to eq({}) + expect(call.metadata_to_send).to eq({}) + expect(call.deadline.is_a?(Time)).to be(true) + end + + it 'should not crash when call used after an unary call is finished' do + req = EchoMsg.new + stub = CheckCallAfterFinishedServiceStub.new(@alt_host, + :this_channel_is_insecure) + resp = stub.an_rpc(req) + expect(resp).to be_a(EchoMsg) + @srv.stop + @srv_thd.join + + check_single_req_view_of_finished_call(@service.server_side_call) + end + + it 'should not crash when call used after client streaming finished' do + requests = [EchoMsg.new, EchoMsg.new] + stub = CheckCallAfterFinishedServiceStub.new(@alt_host, + :this_channel_is_insecure) + resp = stub.a_client_streaming_rpc(requests) + expect(resp).to be_a(EchoMsg) + @srv.stop + @srv_thd.join + + check_multi_req_view_of_finished_call(@service.server_side_call) + end + + it 'should not crash when call used after server streaming finished' do + req = EchoMsg.new + stub = CheckCallAfterFinishedServiceStub.new(@alt_host, + :this_channel_is_insecure) + responses = stub.a_server_streaming_rpc(req) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + @srv.stop + @srv_thd.join + + check_single_req_view_of_finished_call(@service.server_side_call) + end + + it 'should not crash when call used after a bidi call is finished' do + requests = [EchoMsg.new, EchoMsg.new] + stub = CheckCallAfterFinishedServiceStub.new(@alt_host, + :this_channel_is_insecure) + responses = stub.a_bidi_rpc(requests) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + @srv.stop + @srv_thd.join + + check_multi_req_view_of_finished_call(@service.server_side_call) + end + end end end From a74ea8602d60bfdf4d99642e6c72573b803e69dc Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 19 Jul 2017 17:43:30 -0700 Subject: [PATCH 36/47] Nit correction --- src/core/ext/filters/client_channel/http_proxy.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index a8a23ceb9ed..2e2471e68d6 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -36,8 +36,8 @@ /** * Parses the 'http_proxy' env var and returns the proxy hostname to resolve or - * NULL on error. Also sets 'user_cred' if it is not NULL to user credentials - * if present in the 'http_proxy' env var. + * NULL on error. Also sets 'user_cred' to user credentials present in the + * 'http_proxy' env var, NULL if not present. */ static char *grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, char **user_cred) { @@ -61,7 +61,7 @@ static char *grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, char **authority_strs = NULL; size_t authority_nstrs; gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs); - GPR_ASSERT(authority_nstrs != 0); /* should have atleast 1 string */ + GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */ if(authority_nstrs == 1) { /* User cred not present in authority */ proxy_name = gpr_strdup(authority_strs[0]); From 0c22cad01fe778ca2df217c7ef2a68dfac24254d Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Thu, 20 Jul 2017 10:33:52 -0700 Subject: [PATCH 37/47] Improve a comment in inproc transport --- src/core/ext/transport/inproc/inproc_transport.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c index 4df64d81e2b..14498021ebd 100644 --- a/src/core/ext/transport/inproc/inproc_transport.c +++ b/src/core/ext/transport/inproc/inproc_transport.c @@ -190,8 +190,11 @@ typedef struct inproc_stream { static bool inproc_slice_byte_stream_next(grpc_exec_ctx *exec_ctx, grpc_byte_stream *bs, size_t max, grpc_closure *on_complete) { - inproc_slice_byte_stream *stream = (inproc_slice_byte_stream *)bs; - return (stream->le->sb.count != 0); + // Because inproc transport always provides the entire message atomically, + // the byte stream always has data available when this function is called. + // Thus, this function always returns true (unlike other transports) and + // there is never any need to schedule a closure + return true; } static grpc_error *inproc_slice_byte_stream_pull(grpc_exec_ctx *exec_ctx, From 655c8b10f647c339cbc54afb8dfde95980d303e7 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Thu, 20 Jul 2017 10:23:43 -0700 Subject: [PATCH 38/47] Fix Node build for older versions of Electron on Mac --- binding.gyp | 3 ++- templates/binding.gyp.template | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/binding.gyp b/binding.gyp index 705b8f22c0c..d5902cc68f0 100644 --- a/binding.gyp +++ b/binding.gyp @@ -193,7 +193,8 @@ '-Wno-unused-parameter', '-DOSATOMIC_USE_INLINED=1', '-stdlib=libc++', - '-std=c++11' + '-std=c++11', + '-Wno-error=deprecated-declarations' ], }, }] diff --git a/templates/binding.gyp.template b/templates/binding.gyp.template index b1e1374249d..adb7d9f7746 100644 --- a/templates/binding.gyp.template +++ b/templates/binding.gyp.template @@ -176,7 +176,8 @@ '${item}', % endfor '-stdlib=libc++', - '-std=c++11' + '-std=c++11', + '-Wno-error=deprecated-declarations' ], % endif }, From a858990b227055a8a1f61fb139952d85b8ab55b9 Mon Sep 17 00:00:00 2001 From: Adele Zhou Date: Thu, 20 Jul 2017 14:29:39 -0700 Subject: [PATCH 39/47] Update perf link. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0edea885185..995f877219c 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ See [INSTALL](INSTALL.md) for installation instructions for various platforms. See [tools/run_tests](tools/run_tests) for more guidance on how to run various test suites (e.g. unit tests, interop tests, benchmarks) -See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5712453606309888) for the performance numbers for v1.0.x. +See [Performance dashboard](http://performance-dot-grpc-testing.appspot.com/explore?dashboard=5636470266134528) for the performance numbers for the latest released version. # Repository Structure & Status From 5c92dcde1ee0c25a3c341c88af62fec600645252 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Thu, 20 Jul 2017 14:55:49 -0700 Subject: [PATCH 40/47] Allow passing regex to bm_diff tools --- .../microbenchmarks/bm_diff/bm_diff.py | 5 +++-- .../microbenchmarks/bm_diff/bm_main.py | 14 ++++++------- .../microbenchmarks/bm_diff/bm_run.py | 21 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py index 809817a1a8c..1ac951f3d86 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py @@ -144,7 +144,7 @@ def _read_json(filename, badjson_files, nonexistant_files): def fmt_dict(d): return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d]) -def diff(bms, loops, track, old, new, counters): +def diff(bms, loops, regex, track, old, new, counters): benchmarks = collections.defaultdict(Benchmark) badjson_files = {} @@ -153,7 +153,8 @@ def diff(bms, loops, track, old, new, counters): for loop in range(0, loops): for line in subprocess.check_output( ['bm_diff_%s/opt/%s' % (old, bm), - '--benchmark_list_tests']).splitlines(): + '--benchmark_list_tests', + '--benchmark_filter=%s' % regex]).splitlines(): stripped_line = line.strip().replace("/", "_").replace( "<", "_").replace(">", "_").replace(", ", "_") js_new_opt = _read_json('%s.%s.opt.%s.%d.json' % diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py index 8b4e0cb69a4..5aa11ac391e 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py @@ -63,10 +63,10 @@ def _args(): help='Name of baseline run to compare to. Ususally just called "old"') argp.add_argument( '-r', - '--repetitions', - type=int, - default=1, - help='Number of repetitions to pass to the benchmarks') + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') argp.add_argument( '-l', '--loops', @@ -125,10 +125,10 @@ def main(args): subprocess.check_call(['git', 'checkout', where_am_i]) subprocess.check_call(['git', 'submodule', 'update']) - bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) - bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) + bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.regex, args.counters) + bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) - diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old, + diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old, 'new', args.counters) if diff: text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index 72b3d3cf106..206f7c5845f 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -56,10 +56,10 @@ def _args(): ) argp.add_argument( '-r', - '--repetitions', - type=int, - default=1, - help='Number of repetitions to pass to the benchmarks') + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') argp.add_argument( '-l', '--loops', @@ -77,18 +77,17 @@ def _args(): return args -def _collect_bm_data(bm, cfg, name, reps, idx, loops): +def _collect_bm_data(bm, cfg, name, regex, idx, loops): jobs_list = [] for line in subprocess.check_output( ['bm_diff_%s/%s/%s' % (name, cfg, bm), - '--benchmark_list_tests']).splitlines(): + '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines(): stripped_line = line.strip().replace("/", "_").replace( "<", "_").replace(">", "_").replace(", ", "_") cmd = [ 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' % line, '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json', - '--benchmark_repetitions=%d' % (reps) ] jobs_list.append( jobset.JobSpec( @@ -100,13 +99,13 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops): return jobs_list -def run(name, benchmarks, jobs, loops, reps, counters): +def run(name, benchmarks, jobs, loops, regex, counters): jobs_list = [] for loop in range(0, loops): for bm in benchmarks: - jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops) + jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops) if counters: - jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop, + jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, loops) random.shuffle(jobs_list, random.SystemRandom().random) jobset.run(jobs_list, maxjobs=jobs) @@ -114,4 +113,4 @@ def run(name, benchmarks, jobs, loops, reps, counters): if __name__ == '__main__': args = _args() - run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters) + run(args.name, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) From 7cc83e0701aafb2cc2412796b72bddbfbda4eac4 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Wed, 19 Jul 2017 17:39:04 -0700 Subject: [PATCH 41/47] add a standalone client auth test --- src/ruby/spec/client_auth_spec.rb | 137 +++++++++++++++++++++++ src/ruby/spec/generic/rpc_server_spec.rb | 8 +- src/ruby/spec/testdata/client.key | 16 +++ src/ruby/spec/testdata/client.pem | 14 +++ 4 files changed, 171 insertions(+), 4 deletions(-) create mode 100644 src/ruby/spec/client_auth_spec.rb create mode 100644 src/ruby/spec/testdata/client.key create mode 100644 src/ruby/spec/testdata/client.pem diff --git a/src/ruby/spec/client_auth_spec.rb b/src/ruby/spec/client_auth_spec.rb new file mode 100644 index 00000000000..79c9192aa5f --- /dev/null +++ b/src/ruby/spec/client_auth_spec.rb @@ -0,0 +1,137 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require 'grpc' + +def create_channel_creds + test_root = File.join(File.dirname(__FILE__), 'testdata') + files = ['ca.pem', 'client.key', 'client.pem'] + creds = files.map { |f| File.open(File.join(test_root, f)).read } + GRPC::Core::ChannelCredentials.new(creds[0], creds[1], creds[2]) +end + +def client_cert + test_root = File.join(File.dirname(__FILE__), 'testdata') + cert = File.open(File.join(test_root, 'client.pem')).read + fail unless cert.is_a?(String) + cert +end + +def create_server_creds + test_root = File.join(File.dirname(__FILE__), 'testdata') + p "test root: #{test_root}" + files = ['ca.pem', 'server1.key', 'server1.pem'] + creds = files.map { |f| File.open(File.join(test_root, f)).read } + GRPC::Core::ServerCredentials.new( + creds[0], + [{ private_key: creds[1], cert_chain: creds[2] }], + true) # force client auth +end + +# A test message +class EchoMsg + def self.marshal(_o) + '' + end + + def self.unmarshal(_o) + EchoMsg.new + end +end + +# a test service that checks the cert of its peer +class SslTestService + include GRPC::GenericService + rpc :an_rpc, EchoMsg, EchoMsg + rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg + rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg) + rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg) + + def check_peer_cert(call) + error_msg = "want:\n#{client_cert}\n\ngot:\n#{call.peer_cert}" + fail(error_msg) unless call.peer_cert == client_cert + end + + def an_rpc(req, call) + check_peer_cert(call) + req + end + + def a_client_streaming_rpc(call) + check_peer_cert(call) + call.each_remote_read.each { |r| p r } + EchoMsg.new + end + + def a_server_streaming_rpc(_, call) + check_peer_cert(call) + [EchoMsg.new, EchoMsg.new] + end + + def a_bidi_rpc(requests, call) + check_peer_cert(call) + requests.each { |r| p r } + [EchoMsg.new, EchoMsg.new] + end +end + +SslTestServiceStub = SslTestService.rpc_stub_class + +describe 'client-server auth' do + RpcServer = GRPC::RpcServer + + before(:all) do + server_opts = { + poll_period: 1 + } + @srv = RpcServer.new(**server_opts) + port = @srv.add_http2_port('0.0.0.0:0', create_server_creds) + @srv.handle(SslTestService) + @srv_thd = Thread.new { @srv.run } + @srv.wait_till_running + + client_opts = { + channel_args: { + GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr' + } + } + @stub = SslTestServiceStub.new("localhost:#{port}", + create_channel_creds, + **client_opts) + end + + after(:all) do + expect(@srv.stopped?).to be(false) + @srv.stop + @srv_thd.join + end + + it 'client-server auth with unary RPCs' do + @stub.an_rpc(EchoMsg.new) + end + + it 'client-server auth with client streaming RPCs' do + @stub.a_client_streaming_rpc([EchoMsg.new, EchoMsg.new]) + end + + it 'client-server auth with server streaming RPCs' do + responses = @stub.a_server_streaming_rpc(EchoMsg.new) + responses.each { |r| p r } + end + + it 'client-server auth with bidi RPCs' do + responses = @stub.a_bidi_rpc([EchoMsg.new, EchoMsg.new]) + responses.each { |r| p r } + end +end diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index 4258d59851d..e0646f45997 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -123,13 +123,13 @@ class CheckCallAfterFinishedService attr_reader :server_side_call def an_rpc(req, call) - fail 'shouldnt reuse service' unless @call.nil? + fail 'shouldnt reuse service' unless @server_side_call.nil? @server_side_call = call req end def a_client_streaming_rpc(call) - fail 'shouldnt reuse service' unless @call.nil? + fail 'shouldnt reuse service' unless @server_side_call.nil? @server_side_call = call # iterate through requests so call can complete call.each_remote_read.each { |r| p r } @@ -137,13 +137,13 @@ class CheckCallAfterFinishedService end def a_server_streaming_rpc(_, call) - fail 'shouldnt reuse service' unless @call.nil? + fail 'shouldnt reuse service' unless @server_side_call.nil? @server_side_call = call [EchoMsg.new, EchoMsg.new] end def a_bidi_rpc(requests, call) - fail 'shouldnt reuse service' unless @call.nil? + fail 'shouldnt reuse service' unless @server_side_call.nil? @server_side_call = call requests.each { |r| p r } [EchoMsg.new, EchoMsg.new] diff --git a/src/ruby/spec/testdata/client.key b/src/ruby/spec/testdata/client.key new file mode 100644 index 00000000000..f48d0735d99 --- /dev/null +++ b/src/ruby/spec/testdata/client.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICeQIBADANBgkqhkiG9w0BAQEFAASCAmMwggJfAgEAAoGBAOxUR9uhvhbeVUIM +s5WbH0px0mehl2+6sZpNjzvE2KimZpHzMJHukVH0Ffkvhs0b8+S5Ut9VNUAqd3IM +JCCAEGtRNoQhM1t9Yr2zAckSvbRacp+FL/Cj9eDmyo00KsVGaeefA4Dh4OW+ZhkT +NKcldXqkSuj1sEf244JZYuqZp6/tAgMBAAECgYEAi2NSVqpZMafE5YYUTcMGe6QS +k2jtpsqYgggI2RnLJ/2tNZwYI5pwP8QVSbnMaiF4gokD5hGdrNDfTnb2v+yIwYEH +0w8+oG7Z81KodsiZSIDJfTGsAZhVNwOz9y0VD8BBZZ1/274Zh52AUKLjZS/ZwIbS +W2ywya855dPnH/wj+0ECQQD9X8D920kByTNHhBG18biAEZ4pxs9f0OAG8333eVcI +w2lJDLsYDZrCB2ocgA3lUdozlzPC7YDYw8reg0tkiRY5AkEA7sdNzOeQsQRn7++5 +0bP9DtT/iON1gbfxRzCfCfXdoOtfQWIzTePWtURt9X/5D9NofI0Rg5W2oGy/MLe5 +/sXHVQJBAIup5XrJDkQywNZyAUU2ecn2bCWBFjwtqd+LBmuMciI9fOKsZtEKZrz/ +U0lkeMRoSwvXE8wmGLjjrAbdfohrXFkCQQDZEx/LtIl6JINJQiswVe0tWr6k+ASP +1WXoTm+HYpoF/XUvv9LccNF1IazFj34hwRQwhx7w/V52Ieb+p0jUMYGxAkEAjDhd +9pBO1fKXWiXzi9ZKfoyTNcUq3eBSVKwPG2nItg5ycXengjT5sgcWDnciIzW7BIVI +JiqOszq9GWESErAatg== +-----END PRIVATE KEY----- diff --git a/src/ruby/spec/testdata/client.pem b/src/ruby/spec/testdata/client.pem new file mode 100644 index 00000000000..e332091019b --- /dev/null +++ b/src/ruby/spec/testdata/client.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICHzCCAYgCAQEwDQYJKoZIhvcNAQEFBQAwVjELMAkGA1UEBhMCQVUxEzARBgNV +BAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0 +ZDEPMA0GA1UEAwwGdGVzdGNhMB4XDTE0MDcxNzIzNTYwMloXDTI0MDcxNDIzNTYw +MlowWjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDETMBEGA1UEAwwKdGVzdGNsaWVudDCB +nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA7FRH26G+Ft5VQgyzlZsfSnHSZ6GX +b7qxmk2PO8TYqKZmkfMwke6RUfQV+S+GzRvz5LlS31U1QCp3cgwkIIAQa1E2hCEz +W31ivbMByRK9tFpyn4Uv8KP14ObKjTQqxUZp558DgOHg5b5mGRM0pyV1eqRK6PWw +R/bjglli6pmnr+0CAwEAATANBgkqhkiG9w0BAQUFAAOBgQAStSm5PM7ubROiKK6/ +T2FkKlhiTOx+Ryenm3Eio59emq+jXl+1nhPySX5G2PQzSR5vd1dIhwgZSR4Gyttk +tRZ57k/NI1brUW8joiEOMJA/Mr7H7asx7wIRYDE91Fs8GkKWd5LhoPAQj+qdG35C +OO+svdkmqH0KZo320ZUqdl2ooQ== +-----END CERTIFICATE----- From 9d7def71fe04ec25b7cace53adb5775dbcccc353 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Thu, 20 Jul 2017 16:41:27 -0700 Subject: [PATCH 42/47] Address comments: change names and remove unnecessary lines --- src/core/lib/iomgr/iomgr_uv.c | 4 ++-- src/core/lib/iomgr/iomgr_uv.h | 8 ++++---- src/core/lib/iomgr/pollset_uv.c | 12 ++++++------ src/core/lib/iomgr/resolve_address_uv.c | 4 ++-- src/core/lib/iomgr/tcp_client_uv.c | 2 +- src/core/lib/iomgr/tcp_server_uv.c | 14 +++++++------- src/core/lib/iomgr/tcp_uv.c | 4 ++-- src/core/lib/iomgr/timer_uv.c | 6 +++--- 8 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/core/lib/iomgr/iomgr_uv.c b/src/core/lib/iomgr/iomgr_uv.c index ffec6bcf766..df5d23af3bb 100644 --- a/src/core/lib/iomgr/iomgr_uv.c +++ b/src/core/lib/iomgr/iomgr_uv.c @@ -26,14 +26,14 @@ #include "src/core/lib/iomgr/pollset_uv.h" #include "src/core/lib/iomgr/tcp_uv.h" -gpr_thd_id grpc_init_thread; +gpr_thd_id g_init_thread; void grpc_iomgr_platform_init(void) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_global_init(); grpc_register_tracer(&grpc_tcp_trace); grpc_executor_set_threading(&exec_ctx, false); - grpc_init_thread = gpr_thd_currentid(); + g_init_thread = gpr_thd_currentid(); grpc_exec_ctx_finish(&exec_ctx); } void grpc_iomgr_platform_flush(void) {} diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_uv.h index bd406c34f72..3b4daaa73ba 100644 --- a/src/core/lib/iomgr/iomgr_uv.h +++ b/src/core/lib/iomgr/iomgr_uv.h @@ -25,13 +25,13 @@ /* The thread ID of the thread on which grpc was initialized. Used to verify * that all calls into libuv are made on that same thread */ -extern gpr_thd_id grpc_init_thread; +extern gpr_thd_id g_init_thread; #ifdef GRPC_UV_THREAD_CHECK -#define GRPC_ASSERT_SAME_THREAD() \ - GPR_ASSERT(gpr_thd_currentid() == grpc_init_thread) +#define GRPC_UV_ASSERT_SAME_THREAD() \ + GPR_ASSERT(gpr_thd_currentid() == g_init_thread) #else -#define GRPC_ASSERT_SAME_THREAD() +#define GRPC_UV_ASSERT_SAME_THREAD() #endif /* GRPC_UV_THREAD_CHECK */ #endif /* GRPC_CORE_LIB_IOMGR_IOMGR_UV_H */ diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c index e59801834dd..a79fe89d3ef 100644 --- a/src/core/lib/iomgr/pollset_uv.c +++ b/src/core/lib/iomgr/pollset_uv.c @@ -71,7 +71,7 @@ void grpc_pollset_global_init(void) { } void grpc_pollset_global_shutdown(void) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); gpr_mu_destroy(&grpc_polling_mu); uv_close((uv_handle_t *)dummy_uv_handle, dummy_handle_close_cb); } @@ -81,7 +81,7 @@ static void timer_run_cb(uv_timer_t *timer) {} static void timer_close_cb(uv_handle_t *handle) { handle->data = (void *)1; } void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); *mu = &grpc_polling_mu; uv_timer_init(uv_default_loop(), &pollset->timer); pollset->shutting_down = 0; @@ -90,7 +90,7 @@ void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) { void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_closure *closure) { GPR_ASSERT(!pollset->shutting_down); - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); pollset->shutting_down = 1; if (grpc_pollset_work_run_loop) { // Drain any pending UV callbacks without blocking @@ -103,7 +103,7 @@ void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, } void grpc_pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); uv_close((uv_handle_t *)&pollset->timer, timer_close_cb); // timer.data is a boolean indicating that the timer has finished closing pollset->timer.data = (void *)0; @@ -118,7 +118,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker_hdl, gpr_timespec now, gpr_timespec deadline) { uint64_t timeout; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); gpr_mu_unlock(&grpc_polling_mu); if (grpc_pollset_work_run_loop) { if (gpr_time_cmp(deadline, now) >= 0) { @@ -147,7 +147,7 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_error *grpc_pollset_kick(grpc_pollset *pollset, grpc_pollset_worker *specific_worker) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0); return GRPC_ERROR_NONE; } diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c index f910ec26b26..2d438e8b486 100644 --- a/src/core/lib/iomgr/resolve_address_uv.c +++ b/src/core/lib/iomgr/resolve_address_uv.c @@ -175,7 +175,7 @@ static grpc_error *blocking_resolve_address_impl( grpc_error *err; int retry_status; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); req.addrinfo = NULL; @@ -231,7 +231,7 @@ static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name, char *port = NULL; grpc_error *err; int s; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); err = try_split_host_port(name, default_port, &host, &port); if (err != GRPC_ERROR_NONE) { GRPC_CLOSURE_SCHED(exec_ctx, on_done, err); diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 098ff7648de..786c456b735 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -125,7 +125,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, (void)channel_args; (void)interested_parties; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (channel_args != NULL) { for (size_t i = 0; i < channel_args->num_args; i++) { diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c index 7b45e08c9b8..7edf22a0326 100644 --- a/src/core/lib/iomgr/tcp_server_uv.c +++ b/src/core/lib/iomgr/tcp_server_uv.c @@ -108,7 +108,7 @@ grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx, } grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); gpr_ref(&s->refs); return s; } @@ -147,12 +147,10 @@ static void handle_close_callback(uv_handle_t *handle) { } static void close_listener(grpc_tcp_listener *sp) { - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; if (!sp->closed) { sp->closed = true; uv_close((uv_handle_t *)sp->handle, handle_close_callback); } - grpc_exec_ctx_finish(&exec_ctx); } static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { @@ -175,7 +173,7 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { } void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (gpr_unref(&s->refs)) { /* Complete shutdown_starting work before destroying. */ grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT; @@ -248,7 +246,9 @@ static void on_connect(uv_stream_t *server, int status) { GPR_ASSERT(!sp->has_pending_connection); - gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server); + if (GRPC_TRACER_ON(grpc_tcp_trace)) { + gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p incoming connection", sp->server); + } // Create acceptor. if (sp->server->on_accept_cb) { @@ -338,7 +338,7 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s, int status; grpc_error *error = GRPC_ERROR_NONE; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (s->tail != NULL) { port_index = s->tail->port_index + 1; @@ -420,7 +420,7 @@ void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *server, grpc_tcp_listener *sp; (void)pollsets; (void)pollset_count; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "SERVER_START %p", server); } diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c index 9b1a8db7235..a05c19b4ac9 100644 --- a/src/core/lib/iomgr/tcp_uv.c +++ b/src/core/lib/iomgr/tcp_uv.c @@ -184,7 +184,7 @@ static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_tcp *tcp = (grpc_tcp *)ep; int status; grpc_error *error = GRPC_ERROR_NONE; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; tcp->read_slices = read_slices; @@ -238,7 +238,7 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, unsigned int i; grpc_slice *slice; uv_write_t *write_req; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (GRPC_TRACER_ON(grpc_tcp_trace)) { size_t j; diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c index ff2570c60d1..70f49bcbe87 100644 --- a/src/core/lib/iomgr/timer_uv.c +++ b/src/core/lib/iomgr/timer_uv.c @@ -44,7 +44,7 @@ static void stop_uv_timer(uv_timer_t *handle) { void run_expired_timer(uv_timer_t *handle) { grpc_timer *timer = (grpc_timer *)handle->data; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); GPR_ASSERT(timer->pending); timer->pending = 0; GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE); @@ -57,7 +57,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_timespec now) { uint64_t timeout; uv_timer_t *uv_timer; - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); timer->closure = closure; if (gpr_time_cmp(deadline, now) <= 0) { timer->pending = 0; @@ -78,7 +78,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, } void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { - GRPC_ASSERT_SAME_THREAD(); + GRPC_UV_ASSERT_SAME_THREAD(); if (timer->pending) { timer->pending = 0; GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); From c62ce80b805837dc2246b453a4a8b76c886cb6a3 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Thu, 20 Jul 2017 16:48:01 -0700 Subject: [PATCH 43/47] Test credentials are passed with channel arg. Renamed macros and refactored code as per suggestions. Renamed test to proxy_auth and changed it to use simple_request instead of a payload. --- CMakeLists.txt | 4 +- Makefile | 4 +- .../ext/filters/client_channel/http_proxy.c | 58 +- test/core/end2end/end2end_nosec_tests.c | 16 +- test/core/end2end/end2end_tests.c | 16 +- test/core/end2end/fixtures/h2_http_proxy.c | 23 +- .../end2end/fixtures/http_proxy_fixture.c | 68 +- .../end2end/fixtures/http_proxy_fixture.h | 12 +- test/core/end2end/gen_build_yaml.py | 15 +- test/core/end2end/generate_tests.bzl | 18 +- ...payload_with_proxy_auth.c => proxy_auth.c} | 155 +--- .../generated/sources_and_headers.json | 4 +- tools/run_tests/generated/tests.json | 848 +----------------- .../end2end_nosec_tests.vcxproj | 4 +- .../end2end_nosec_tests.vcxproj.filters | 6 +- .../tests/end2end_tests/end2end_tests.vcxproj | 4 +- .../end2end_tests.vcxproj.filters | 6 +- 17 files changed, 201 insertions(+), 1060 deletions(-) rename test/core/end2end/tests/{payload_with_proxy_auth.c => proxy_auth.c} (60%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1d6cf021d4d..266f2c0774a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4352,9 +4352,9 @@ add_library(end2end_tests test/core/end2end/tests/no_logging.c test/core/end2end/tests/no_op.c test/core/end2end/tests/payload.c - test/core/end2end/tests/payload_with_proxy_auth.c test/core/end2end/tests/ping.c test/core/end2end/tests/ping_pong_streaming.c + test/core/end2end/tests/proxy_auth.c test/core/end2end/tests/registered_call.c test/core/end2end/tests/request_with_flags.c test/core/end2end/tests/request_with_payload.c @@ -4452,9 +4452,9 @@ add_library(end2end_nosec_tests test/core/end2end/tests/no_logging.c test/core/end2end/tests/no_op.c test/core/end2end/tests/payload.c - test/core/end2end/tests/payload_with_proxy_auth.c test/core/end2end/tests/ping.c test/core/end2end/tests/ping_pong_streaming.c + test/core/end2end/tests/proxy_auth.c test/core/end2end/tests/registered_call.c test/core/end2end/tests/request_with_flags.c test/core/end2end/tests/request_with_payload.c diff --git a/Makefile b/Makefile index a311dee6592..7b53024b6c1 100644 --- a/Makefile +++ b/Makefile @@ -7950,9 +7950,9 @@ LIBEND2END_TESTS_SRC = \ test/core/end2end/tests/no_logging.c \ test/core/end2end/tests/no_op.c \ test/core/end2end/tests/payload.c \ - test/core/end2end/tests/payload_with_proxy_auth.c \ test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping_pong_streaming.c \ + test/core/end2end/tests/proxy_auth.c \ test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_payload.c \ @@ -8045,9 +8045,9 @@ LIBEND2END_NOSEC_TESTS_SRC = \ test/core/end2end/tests/no_logging.c \ test/core/end2end/tests/no_op.c \ test/core/end2end/tests/payload.c \ - test/core/end2end/tests/payload_with_proxy_auth.c \ test/core/end2end/tests/ping.c \ test/core/end2end/tests/ping_pong_streaming.c \ + test/core/end2end/tests/proxy_auth.c \ test/core/end2end/tests/registered_call.c \ test/core/end2end/tests/request_with_flags.c \ test/core/end2end/tests/request_with_payload.c \ diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index 2e2471e68d6..8a14b4e57c6 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -30,21 +30,19 @@ #include "src/core/ext/filters/client_channel/proxy_mapper_registry.h" #include "src/core/ext/filters/client_channel/uri_parser.h" #include "src/core/lib/channel/channel_args.h" +#include "src/core/lib/slice/b64.h" #include "src/core/lib/support/env.h" #include "src/core/lib/support/string.h" -#include "src/core/lib/slice/b64.h" /** * Parses the 'http_proxy' env var and returns the proxy hostname to resolve or - * NULL on error. Also sets 'user_cred' to user credentials present in the - * 'http_proxy' env var, NULL if not present. + * NULL on error. Also sets 'user_cred' to user credentials if present in the + * 'http_proxy' env var, otherwise leaves it unchanged. It is caller's + * responsibility to gpr_free user_cred. */ -static char *grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, - char **user_cred) { - char *proxy_name = NULL; - if(user_cred != NULL) { - *user_cred = NULL; - } +static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) { + GPR_ASSERT(user_cred != NULL); + char* proxy_name = NULL; char* uri_str = gpr_getenv("http_proxy"); if (uri_str == NULL) return NULL; grpc_uri* uri = @@ -58,27 +56,25 @@ static char *grpc_get_http_proxy_server(grpc_exec_ctx* exec_ctx, goto done; } /* Split on '@' to separate user credentials from host */ - char **authority_strs = NULL; + char** authority_strs = NULL; size_t authority_nstrs; gpr_string_split(uri->authority, "@", &authority_strs, &authority_nstrs); GPR_ASSERT(authority_nstrs != 0); /* should have at least 1 string */ - if(authority_nstrs == 1) { - /* User cred not present in authority */ - proxy_name = gpr_strdup(authority_strs[0]); - } else if(authority_nstrs == 2) { - /* User cred found */ - if(user_cred != NULL) { - *user_cred = gpr_strdup(authority_strs[0]); - } - proxy_name = gpr_strdup(authority_strs[1]); + if (authority_nstrs == 1) { + /* User cred not present in authority */ + proxy_name = authority_strs[0]; + } else if (authority_nstrs == 2) { + /* User cred found */ + *user_cred = authority_strs[0]; + proxy_name = authority_strs[1]; gpr_log(GPR_INFO, "userinfo found in proxy URI"); } else { - /* Bad authority */ + /* Bad authority */ + for (size_t i = 0; i < authority_nstrs; i++) { + gpr_free(authority_strs[i]); + } proxy_name = NULL; } - for(size_t i = 0; i < authority_nstrs; i++) { - gpr_free(authority_strs[i]); - } gpr_free(authority_strs); done: gpr_free(uri_str); @@ -92,8 +88,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, const grpc_channel_args* args, char** name_to_resolve, grpc_channel_args** new_args) { - char *user_cred = NULL; - *name_to_resolve = grpc_get_http_proxy_server(exec_ctx, &user_cred); + char* user_cred = NULL; + *name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred); if (*name_to_resolve == NULL) return false; grpc_uri* uri = grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); @@ -163,15 +159,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, args_to_add[0] = grpc_channel_arg_string_create( GRPC_ARG_HTTP_CONNECT_SERVER, uri->path[0] == '/' ? uri->path + 1 : uri->path); - if(user_cred != NULL) { - /* Use base64 encoding for user credentials */ - char *encoded_user_cred = + if (user_cred != NULL) { + /* Use base64 encoding for user credentials as stated in RFC 7617 */ + char* encoded_user_cred = grpc_base64_encode(user_cred, strlen(user_cred), 0, 0); - char *header; + char* header; gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred); gpr_free(encoded_user_cred); - args_to_add[1] = grpc_channel_arg_string_create( - GRPC_ARG_HTTP_CONNECT_HEADERS, header); + args_to_add[1] = + grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header); *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2); gpr_free(header); } else { diff --git a/test/core/end2end/end2end_nosec_tests.c b/test/core/end2end/end2end_nosec_tests.c index 483f84e7837..6a061a4e2d7 100644 --- a/test/core/end2end/end2end_nosec_tests.c +++ b/test/core/end2end/end2end_nosec_tests.c @@ -102,12 +102,12 @@ extern void no_op(grpc_end2end_test_config config); extern void no_op_pre_init(void); extern void payload(grpc_end2end_test_config config); extern void payload_pre_init(void); -extern void payload_with_proxy_auth(grpc_end2end_test_config config); -extern void payload_with_proxy_auth_pre_init(void); extern void ping(grpc_end2end_test_config config); extern void ping_pre_init(void); extern void ping_pong_streaming(grpc_end2end_test_config config); extern void ping_pong_streaming_pre_init(void); +extern void proxy_auth(grpc_end2end_test_config config); +extern void proxy_auth_pre_init(void); extern void registered_call(grpc_end2end_test_config config); extern void registered_call_pre_init(void); extern void request_with_flags(grpc_end2end_test_config config); @@ -181,9 +181,9 @@ void grpc_end2end_tests_pre_init(void) { no_logging_pre_init(); no_op_pre_init(); payload_pre_init(); - payload_with_proxy_auth_pre_init(); ping_pre_init(); ping_pong_streaming_pre_init(); + proxy_auth_pre_init(); registered_call_pre_init(); request_with_flags_pre_init(); request_with_payload_pre_init(); @@ -245,9 +245,9 @@ void grpc_end2end_tests(int argc, char **argv, no_logging(config); no_op(config); payload(config); - payload_with_proxy_auth(config); ping(config); ping_pong_streaming(config); + proxy_auth(config); registered_call(config); request_with_flags(config); request_with_payload(config); @@ -412,10 +412,6 @@ void grpc_end2end_tests(int argc, char **argv, payload(config); continue; } - if (0 == strcmp("payload_with_proxy_auth", argv[i])) { - payload_with_proxy_auth(config); - continue; - } if (0 == strcmp("ping", argv[i])) { ping(config); continue; @@ -424,6 +420,10 @@ void grpc_end2end_tests(int argc, char **argv, ping_pong_streaming(config); continue; } + if (0 == strcmp("proxy_auth", argv[i])) { + proxy_auth(config); + continue; + } if (0 == strcmp("registered_call", argv[i])) { registered_call(config); continue; diff --git a/test/core/end2end/end2end_tests.c b/test/core/end2end/end2end_tests.c index 745546dbb7f..3fc7c3fb6cb 100644 --- a/test/core/end2end/end2end_tests.c +++ b/test/core/end2end/end2end_tests.c @@ -104,12 +104,12 @@ extern void no_op(grpc_end2end_test_config config); extern void no_op_pre_init(void); extern void payload(grpc_end2end_test_config config); extern void payload_pre_init(void); -extern void payload_with_proxy_auth(grpc_end2end_test_config config); -extern void payload_with_proxy_auth_pre_init(void); extern void ping(grpc_end2end_test_config config); extern void ping_pre_init(void); extern void ping_pong_streaming(grpc_end2end_test_config config); extern void ping_pong_streaming_pre_init(void); +extern void proxy_auth(grpc_end2end_test_config config); +extern void proxy_auth_pre_init(void); extern void registered_call(grpc_end2end_test_config config); extern void registered_call_pre_init(void); extern void request_with_flags(grpc_end2end_test_config config); @@ -184,9 +184,9 @@ void grpc_end2end_tests_pre_init(void) { no_logging_pre_init(); no_op_pre_init(); payload_pre_init(); - payload_with_proxy_auth_pre_init(); ping_pre_init(); ping_pong_streaming_pre_init(); + proxy_auth_pre_init(); registered_call_pre_init(); request_with_flags_pre_init(); request_with_payload_pre_init(); @@ -249,9 +249,9 @@ void grpc_end2end_tests(int argc, char **argv, no_logging(config); no_op(config); payload(config); - payload_with_proxy_auth(config); ping(config); ping_pong_streaming(config); + proxy_auth(config); registered_call(config); request_with_flags(config); request_with_payload(config); @@ -420,10 +420,6 @@ void grpc_end2end_tests(int argc, char **argv, payload(config); continue; } - if (0 == strcmp("payload_with_proxy_auth", argv[i])) { - payload_with_proxy_auth(config); - continue; - } if (0 == strcmp("ping", argv[i])) { ping(config); continue; @@ -432,6 +428,10 @@ void grpc_end2end_tests(int argc, char **argv, ping_pong_streaming(config); continue; } + if (0 == strcmp("proxy_auth", argv[i])) { + proxy_auth(config); + continue; + } if (0 == strcmp("registered_call", argv[i])) { registered_call(config); continue; diff --git a/test/core/end2end/fixtures/h2_http_proxy.c b/test/core/end2end/fixtures/h2_http_proxy.c index ce93c614dfa..fdc8d749c66 100644 --- a/test/core/end2end/fixtures/h2_http_proxy.c +++ b/test/core/end2end/fixtures/h2_http_proxy.c @@ -47,24 +47,13 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack( grpc_channel_args *client_args, grpc_channel_args *server_args) { grpc_end2end_test_fixture f; memset(&f, 0, sizeof(f)); - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data)); const int server_port = grpc_pick_unused_port_or_die(); gpr_join_host_port(&ffd->server_addr, "localhost", server_port); - /* If we are testing proxy auth, add the proxy auth arg to proxy channel args + /* Passing client_args to proxy_create for the case of checking for proxy auth */ - grpc_channel_args *proxy_args = NULL; - const grpc_arg *proxy_auth_arg = grpc_channel_args_find( - client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT); - if(proxy_auth_arg != NULL) { - proxy_args = grpc_channel_args_copy_and_add(NULL, proxy_auth_arg, 1); - } - ffd->proxy = grpc_end2end_http_proxy_create(proxy_args); - grpc_channel_args_destroy(&exec_ctx, proxy_args); - - grpc_exec_ctx_finish(&exec_ctx); + ffd->proxy = grpc_end2end_http_proxy_create(client_args); f.fixture_data = ffd; f.cq = grpc_completion_queue_create_for_next(NULL); @@ -79,13 +68,13 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f, char *proxy_uri; /* If testing for proxy auth, add credentials to proxy uri */ - if(grpc_channel_args_find( - client_args, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) == NULL) { + const grpc_arg *proxy_auth = + grpc_channel_args_find(client_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS); + if (proxy_auth == NULL) { gpr_asprintf(&proxy_uri, "http://%s", grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); } else { - gpr_asprintf(&proxy_uri, "http://%s@%s", - GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED, + gpr_asprintf(&proxy_uri, "http://%s@%s", proxy_auth->value.string, grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); } gpr_setenv("http_proxy", proxy_uri); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index 239444e75fa..266351d181c 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -309,41 +309,23 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg, /** * Parses the proxy auth header value to check if it matches :- - * Basic + * Basic * Returns true if it matches, false otherwise */ -static bool proxy_auth_header_matches(grpc_exec_ctx *exec_ctx, - char *proxy_auth_header_val) { - if(proxy_auth_header_val == NULL) { +static bool proxy_auth_header_matches(grpc_exec_ctx* exec_ctx, + char* proxy_auth_header_val, + char* expected_cred) { + GPR_ASSERT(proxy_auth_header_val != NULL && expected_cred != NULL); + if (strncmp(proxy_auth_header_val, "Basic ", 6) != 0) { return false; } - char **auth_header_strs; - size_t auth_header_nstrs; - bool auth_header_matches = false; - // Split the auth header value on space - gpr_string_split(proxy_auth_header_val, " ", &auth_header_strs, - &auth_header_nstrs); - if(auth_header_nstrs != 2) { - goto done; - } - // Authentication type should be Basic - if(strcmp(auth_header_strs[0], "Basic") != 0) { - goto done; - } - // should match GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED after decoding + proxy_auth_header_val += 6; grpc_slice decoded_slice = - grpc_base64_decode(exec_ctx, auth_header_strs[1], 0); - if(grpc_slice_str_cmp( - decoded_slice, GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED) != 0) { - goto done; - } - auth_header_matches = true; -done: - for(size_t i = 0; i < auth_header_nstrs; i++) { - gpr_free(auth_header_strs[i]); - } - gpr_free(auth_header_strs); - return auth_header_matches; + grpc_base64_decode(exec_ctx, proxy_auth_header_val, 0); + const bool header_matches = + grpc_slice_str_cmp(decoded_slice, expected_cred) == 0; + grpc_slice_unref_internal(exec_ctx, decoded_slice); + return header_matches; } // Callback to read the HTTP CONNECT request. @@ -395,23 +377,21 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, return; } // If proxy auth is being used, check if the header is present and as expected - if(grpc_channel_args_find( - conn->proxy->channel_args, - GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT) != NULL) { + const grpc_arg* proxy_auth = grpc_channel_args_find( + conn->proxy->channel_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS); + if (proxy_auth != NULL) { bool auth_header_found = false; - for(size_t i = 0; i < conn->http_request.hdr_count; i++) { - if(strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) { - if(!proxy_auth_header_matches( - exec_ctx, conn->http_request.hdrs[i].value)) { - break; - } - auth_header_found = true; + for (size_t i = 0; i < conn->http_request.hdr_count; i++) { + if (strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) { + auth_header_found = proxy_auth_header_matches( + exec_ctx, conn->http_request.hdrs[i].value, + proxy_auth->value.string); break; } } - if(!auth_header_found) { - const char *msg = "HTTP Connect could not verify authentication"; - error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + if (!auth_header_found) { + const char* msg = "HTTP Connect could not verify authentication"; + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(msg); proxy_connection_failed(exec_ctx, conn, true /* is_client */, "HTTP proxy read request", error); GRPC_ERROR_UNREF(error); @@ -503,7 +483,7 @@ static void thread_main(void* arg) { } grpc_end2end_http_proxy* grpc_end2end_http_proxy_create( - grpc_channel_args *args) { + grpc_channel_args* args) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy)); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.h b/test/core/end2end/fixtures/http_proxy_fixture.h index f3da0494ae0..103bd08196c 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.h +++ b/test/core/end2end/fixtures/http_proxy_fixture.h @@ -22,20 +22,18 @@ #include /* The test credentials being used for HTTP Proxy Authorization */ -#define GRPC_END2END_HTTP_PROXY_TEST_CONNECT_CRED "aladdin:opensesame" +#define GRPC_TEST_HTTP_PROXY_AUTH_CREDS "aladdin:opensesame" /* A channel arg key used to indicate that the channel uses proxy authorization. - * The value is of no consequence as just the presence of the argument is - * enough. It is currently kept as of type integer but can be changed as seen - * fit. + * The value (string) should be the proxy auth credentials that should be + * checked. */ -#define GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT \ - "grpc.test.connect_auth" +#define GRPC_ARG_HTTP_PROXY_AUTH_CREDS "grpc.test.proxy_auth" typedef struct grpc_end2end_http_proxy grpc_end2end_http_proxy; grpc_end2end_http_proxy* grpc_end2end_http_proxy_create( - grpc_channel_args *args); + grpc_channel_args* args); void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy); diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py index de8508cc151..18bae63a8ac 100755 --- a/test/core/end2end/gen_build_yaml.py +++ b/test/core/end2end/gen_build_yaml.py @@ -24,9 +24,9 @@ import hashlib FixtureOptions = collections.namedtuple( 'FixtureOptions', - 'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2') + 'fullstack includes_proxy dns_resolver name_resolution secure platforms ci_mac tracing exclude_configs exclude_iomgrs large_writes enables_compression supports_compression is_inproc is_http2 supports_proxy_auth') default_unsecure_fixture_options = FixtureOptions( - True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True) + True, False, True, True, False, ['windows', 'linux', 'mac', 'posix'], True, False, [], [], True, False, True, False, True, False) socketpair_unsecure_fixture_options = default_unsecure_fixture_options._replace(fullstack=False, dns_resolver=False) default_secure_fixture_options = default_unsecure_fixture_options._replace(secure=True) uds_fixture_options = default_unsecure_fixture_options._replace(dns_resolver=False, platforms=['linux', 'mac', 'posix'], exclude_iomgrs=['uv']) @@ -47,7 +47,7 @@ END2END_FIXTURES = { 'h2_full+trace': default_unsecure_fixture_options._replace(tracing=True), 'h2_full+workarounds': default_unsecure_fixture_options, 'h2_http_proxy': default_unsecure_fixture_options._replace( - ci_mac=False, exclude_iomgrs=['uv']), + ci_mac=False, exclude_iomgrs=['uv'], supports_proxy_auth=True), 'h2_oauth2': default_secure_fixture_options._replace( ci_mac=False, exclude_iomgrs=['uv']), 'h2_proxy': default_unsecure_fixture_options._replace( @@ -69,8 +69,8 @@ END2END_FIXTURES = { TestOptions = collections.namedtuple( 'TestOptions', - 'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2') -default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False) + 'needs_fullstack needs_dns needs_names proxyable secure traceable cpu_cost exclude_iomgrs large_writes flaky allows_compression needs_compression exclude_inproc needs_http2 needs_proxy_auth') +default_test_options = TestOptions(False, False, False, True, False, True, 1.0, [], False, False, True, False, False, False, False) connectivity_test_options = default_test_options._replace(needs_fullstack=True) LOWCPU = 0.1 @@ -125,10 +125,10 @@ END2END_TESTS = { 'no_logging': default_test_options._replace(traceable=False), 'no_op': default_test_options, 'payload': default_test_options, - 'payload_with_proxy_auth': default_test_options, 'load_reporting_hook': default_test_options, 'ping_pong_streaming': default_test_options._replace(cpu_cost=LOWCPU), 'ping': connectivity_test_options._replace(proxyable=False, cpu_cost=LOWCPU), + 'proxy_auth': default_test_options._replace(needs_proxy_auth=True), 'registered_call': default_test_options, 'request_with_flags': default_test_options._replace( proxyable=False, cpu_cost=LOWCPU), @@ -179,6 +179,9 @@ def compatible(f, t): if END2END_TESTS[t].needs_http2: if not END2END_FIXTURES[f].is_http2: return False + if END2END_TESTS[t].needs_proxy_auth: + if not END2END_FIXTURES[f].supports_proxy_auth: + return False return True diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl index 1f56ddb5af3..6d1917c0ffc 100755 --- a/test/core/end2end/generate_tests.bzl +++ b/test/core/end2end/generate_tests.bzl @@ -21,7 +21,7 @@ load("//bazel:grpc_build_system.bzl", "grpc_sh_test", "grpc_cc_binary", "grpc_cc def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True, name_resolution=True, secure=True, tracing=False, platforms=['windows', 'linux', 'mac', 'posix'], - is_inproc=False, is_http2=True): + is_inproc=False, is_http2=True, supports_proxy_auth=False): return struct( fullstack=fullstack, includes_proxy=includes_proxy, @@ -30,7 +30,8 @@ def fixture_options(fullstack=True, includes_proxy=False, dns_resolver=True, secure=secure, tracing=tracing, is_inproc=is_inproc, - is_http2=is_http2 + is_http2=is_http2, + supports_proxy_auth=supports_proxy_auth #platforms=platforms ) @@ -47,7 +48,7 @@ END2END_FIXTURES = { 'h2_full+pipe': fixture_options(platforms=['linux']), 'h2_full+trace': fixture_options(tracing=True), 'h2_full+workarounds': fixture_options(), - 'h2_http_proxy': fixture_options(), + 'h2_http_proxy': fixture_options(supports_proxy_auth=True), 'h2_oauth2': fixture_options(), 'h2_proxy': fixture_options(includes_proxy=True), 'h2_sockpair_1byte': fixture_options(fullstack=False, dns_resolver=False), @@ -67,7 +68,8 @@ END2END_FIXTURES = { def test_options(needs_fullstack=False, needs_dns=False, needs_names=False, proxyable=True, secure=False, traceable=False, - exclude_inproc=False, needs_http2=False): + exclude_inproc=False, needs_http2=False, + needs_proxy_auth=False): return struct( needs_fullstack=needs_fullstack, needs_dns=needs_dns, @@ -76,7 +78,8 @@ def test_options(needs_fullstack=False, needs_dns=False, needs_names=False, secure=secure, traceable=traceable, exclude_inproc=exclude_inproc, - needs_http2=needs_http2 + needs_http2=needs_http2, + needs_proxy_auth=needs_proxy_auth ) @@ -120,10 +123,10 @@ END2END_TESTS = { 'no_logging': test_options(traceable=False), 'no_op': test_options(), 'payload': test_options(), - 'payload_with_proxy_auth': test_options(), 'load_reporting_hook': test_options(), 'ping_pong_streaming': test_options(), 'ping': test_options(needs_fullstack=True, proxyable=False), + 'proxy_auth': test_options(needs_proxy_auth=True), 'registered_call': test_options(), 'request_with_flags': test_options(proxyable=False), 'request_with_payload': test_options(), @@ -166,6 +169,9 @@ def compatible(fopt, topt): if topt.needs_http2: if not fopt.is_http2: return False + if topt.needs_proxy_auth: + if not fopt.supports_proxy_auth: + return False return True diff --git a/test/core/end2end/tests/payload_with_proxy_auth.c b/test/core/end2end/tests/proxy_auth.c similarity index 60% rename from test/core/end2end/tests/payload_with_proxy_auth.c rename to test/core/end2end/tests/proxy_auth.c index 4d176f9145b..d922049bcb5 100644 --- a/test/core/end2end/tests/payload_with_proxy_auth.c +++ b/test/core/end2end/tests/proxy_auth.c @@ -16,6 +16,10 @@ * */ +/** + * This test is for checking whether proxy authentication is working with HTTP + * Connect. + */ #include "test/core/end2end/end2end_tests.h" #include "test/core/end2end/fixtures/http_proxy_fixture.h" @@ -23,10 +27,12 @@ #include #include +#include #include #include #include #include +#include "src/core/lib/support/string.h" #include "test/core/end2end/cq_verifier.h" static void *tag(intptr_t t) { return (void *)t; } @@ -85,53 +91,24 @@ static void end_test(grpc_end2end_test_fixture *f) { grpc_completion_queue_destroy(f->shutdown_cq); } -/* Creates and returns a grpc_slice containing random alphanumeric characters. - */ -static grpc_slice generate_random_slice() { - size_t i; - static const char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890"; - char *output; - const size_t output_size = 1024 * 1024; - output = gpr_malloc(output_size); - for (i = 0; i < output_size - 1; ++i) { - output[i] = chars[rand() % (int)(sizeof(chars) - 1)]; - } - output[output_size - 1] = '\0'; - grpc_slice out = grpc_slice_from_copied_string(output); - gpr_free(output); - return out; -} - -static void request_response_with_payload_and_proxy_auth - (grpc_end2end_test_config config, - grpc_end2end_test_fixture f) { - /* Create large request and response bodies. These are big enough to require - * multiple round trips to deliver to the peer, and their exact contents of - * will be verified on completion. */ - grpc_slice request_payload_slice = generate_random_slice(); - grpc_slice response_payload_slice = generate_random_slice(); - +static void simple_request_body(grpc_end2end_test_config config, + grpc_end2end_test_fixture f) { grpc_call *c; grpc_call *s; - grpc_byte_buffer *request_payload = - grpc_raw_byte_buffer_create(&request_payload_slice, 1); - grpc_byte_buffer *response_payload = - grpc_raw_byte_buffer_create(&response_payload_slice, 1); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; - grpc_byte_buffer *request_payload_recv = NULL; - grpc_byte_buffer *response_payload_recv = NULL; grpc_call_details call_details; grpc_status_code status; grpc_call_error error; grpc_slice details; int was_cancelled = 2; + char *peer; - gpr_timespec deadline = n_seconds_from_now(60); + gpr_timespec deadline = five_seconds_from_now(); c = grpc_channel_create_call( f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, grpc_slice_from_static_string("/foo"), @@ -139,6 +116,11 @@ static void request_response_with_payload_and_proxy_auth NULL); GPR_ASSERT(c); + peer = grpc_call_get_peer(c); + GPR_ASSERT(peer != NULL); + gpr_log(GPR_DEBUG, "client_peer_before_call=%s", peer); + gpr_free(peer); + grpc_metadata_array_init(&initial_metadata_recv); grpc_metadata_array_init(&trailing_metadata_recv); grpc_metadata_array_init(&request_metadata_recv); @@ -151,11 +133,6 @@ static void request_response_with_payload_and_proxy_auth op->flags = 0; op->reserved = NULL; op++; - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message.send_message = request_payload; - op->flags = 0; - op->reserved = NULL; - op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; op->flags = 0; op->reserved = NULL; @@ -165,11 +142,6 @@ static void request_response_with_payload_and_proxy_auth op->flags = 0; op->reserved = NULL; op++; - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &response_payload_recv; - op->flags = 0; - op->reserved = NULL; - op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; @@ -187,6 +159,15 @@ static void request_response_with_payload_and_proxy_auth CQ_EXPECT_COMPLETION(cqv, tag(101), 1); cq_verify(cqv); + peer = grpc_call_get_peer(s); + GPR_ASSERT(peer != NULL); + gpr_log(GPR_DEBUG, "server_peer=%s", peer); + gpr_free(peer); + peer = grpc_call_get_peer(c); + GPR_ASSERT(peer != NULL); + gpr_log(GPR_DEBUG, "client_peer=%s", peer); + gpr_free(peer); + memset(ops, 0, sizeof(ops)); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; @@ -194,53 +175,33 @@ static void request_response_with_payload_and_proxy_auth op->flags = 0; op->reserved = NULL; op++; - op->op = GRPC_OP_RECV_MESSAGE; - op->data.recv_message.recv_message = &request_payload_recv; + op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; + op->data.send_status_from_server.trailing_metadata_count = 0; + op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; + grpc_slice status_details = grpc_slice_from_static_string("xyz"); + op->data.send_status_from_server.status_details = &status_details; op->flags = 0; op->reserved = NULL; op++; - error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL); - GPR_ASSERT(GRPC_CALL_OK == error); - - CQ_EXPECT_COMPLETION(cqv, tag(102), 1); - cq_verify(cqv); - - memset(ops, 0, sizeof(ops)); - op = ops; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; op->flags = 0; op->reserved = NULL; op++; - op->op = GRPC_OP_SEND_MESSAGE; - op->data.send_message.send_message = response_payload; - op->flags = 0; - op->reserved = NULL; - op++; - op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; - op->data.send_status_from_server.trailing_metadata_count = 0; - op->data.send_status_from_server.status = GRPC_STATUS_OK; - grpc_slice status_details = grpc_slice_from_static_string("xyz"); - op->data.send_status_from_server.status_details = &status_details; - op->flags = 0; - op->reserved = NULL; - op++; - error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(103), NULL); + error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL); GPR_ASSERT(GRPC_CALL_OK == error); - CQ_EXPECT_COMPLETION(cqv, tag(103), 1); + CQ_EXPECT_COMPLETION(cqv, tag(102), 1); CQ_EXPECT_COMPLETION(cqv, tag(1), 1); cq_verify(cqv); - GPR_ASSERT(status == GRPC_STATUS_OK); + GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz")); GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo")); validate_host_override_string("foo.test.google.fr:1234", call_details.host, config); - GPR_ASSERT(was_cancelled == 0); - GPR_ASSERT(byte_buffer_eq_slice(request_payload_recv, request_payload_slice)); - GPR_ASSERT( - byte_buffer_eq_slice(response_payload_recv, response_payload_slice)); + GPR_ASSERT(0 == call_details.flags); + GPR_ASSERT(was_cancelled == 1); grpc_slice_unref(details); grpc_metadata_array_destroy(&initial_metadata_recv); @@ -252,51 +213,23 @@ static void request_response_with_payload_and_proxy_auth grpc_call_unref(s); cq_verifier_destroy(cqv); - - grpc_byte_buffer_destroy(request_payload); - grpc_byte_buffer_destroy(response_payload); - grpc_byte_buffer_destroy(request_payload_recv); - grpc_byte_buffer_destroy(response_payload_recv); -} - -/* Client sends a request with payload, server reads then returns a response - payload and status. */ -static void test_invoke_request_response_with_payload_and_proxy_auth( - grpc_end2end_test_config config) { - /* Indicate that the proxy requires user auth */ - grpc_arg client_arg = {.type = GRPC_ARG_INTEGER, - .key = GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT, - .value.integer = 0}; - grpc_channel_args client_args = {.num_args = 1, .args = &client_arg}; - grpc_end2end_test_fixture f = begin_test( - config, "test_invoke_request_response_with_payload_and_proxy_auth", - &client_args, NULL); - request_response_with_payload_and_proxy_auth(config, f); - end_test(&f); - config.tear_down_data(&f); } -static void test_invoke_10_request_response_with_payload_and_proxy_auth( - grpc_end2end_test_config config) { - int i; +static void test_invoke_proxy_auth(grpc_end2end_test_config config) { /* Indicate that the proxy requires user auth */ - grpc_arg client_arg = {.type = GRPC_ARG_INTEGER, - .key = GRPC_END2END_HTTP_PROXY_TEST_CONNECT_AUTH_PRESENT, - .value.integer = 0}; + grpc_arg client_arg = {.type = GRPC_ARG_STRING, + .key = GRPC_ARG_HTTP_PROXY_AUTH_CREDS, + .value.string = GRPC_TEST_HTTP_PROXY_AUTH_CREDS}; grpc_channel_args client_args = {.num_args = 1, .args = &client_arg}; - grpc_end2end_test_fixture f = begin_test( - config, "test_invoke_10_request_response_with_payload_and_proxy_auth", - &client_args, NULL); - for (i = 0; i < 10; i++) { - request_response_with_payload_and_proxy_auth(config, f); - } + grpc_end2end_test_fixture f = + begin_test(config, "test_invoke_proxy_auth", &client_args, NULL); + simple_request_body(config, f); end_test(&f); config.tear_down_data(&f); } -void payload_with_proxy_auth(grpc_end2end_test_config config) { - test_invoke_request_response_with_payload_and_proxy_auth(config); - test_invoke_10_request_response_with_payload_and_proxy_auth(config); +void proxy_auth(grpc_end2end_test_config config) { + test_invoke_proxy_auth(config); } -void payload_with_proxy_auth_pre_init(void) {} +void proxy_auth_pre_init(void) {} diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index 313a174b332..cbd0e1d9d70 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7337,9 +7337,9 @@ "test/core/end2end/tests/no_logging.c", "test/core/end2end/tests/no_op.c", "test/core/end2end/tests/payload.c", - "test/core/end2end/tests/payload_with_proxy_auth.c", "test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping_pong_streaming.c", + "test/core/end2end/tests/proxy_auth.c", "test/core/end2end/tests/registered_call.c", "test/core/end2end/tests/request_with_flags.c", "test/core/end2end/tests/request_with_payload.c", @@ -7415,9 +7415,9 @@ "test/core/end2end/tests/no_logging.c", "test/core/end2end/tests/no_op.c", "test/core/end2end/tests/payload.c", - "test/core/end2end/tests/payload_with_proxy_auth.c", "test/core/end2end/tests/ping.c", "test/core/end2end/tests/ping_pong_streaming.c", + "test/core/end2end/tests/proxy_auth.c", "test/core/end2end/tests/registered_call.c", "test/core/end2end/tests/request_with_flags.c", "test/core/end2end/tests/request_with_payload.c", diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json index 901e869e7cd..767a5ce1473 100644 --- a/tools/run_tests/generated/tests.json +++ b/tools/run_tests/generated/tests.json @@ -6629,29 +6629,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_census_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -7921,29 +7898,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_compress_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -9153,28 +9107,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_fakesec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -10307,29 +10239,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_fd_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -11553,29 +11462,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -12693,25 +12579,6 @@ "linux" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_full+pipe_test", - "platforms": [ - "linux" - ] - }, { "args": [ "ping" @@ -13863,29 +13730,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full+trace_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -15155,29 +14999,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full+workarounds_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -16482,14 +16303,14 @@ }, { "args": [ - "payload_with_proxy_auth" + "ping" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -16506,7 +16327,7 @@ }, { "args": [ - "ping" + "ping_pong_streaming" ], "ci_platforms": [ "windows", @@ -16530,14 +16351,14 @@ }, { "args": [ - "ping_pong_streaming" + "proxy_auth" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -17791,29 +17612,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_load_reporting_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -19116,30 +18914,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_oauth2_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -20294,7 +20068,31 @@ }, { "args": [ - "payload_with_proxy_auth" + "ping_pong_streaming" + ], + "ci_platforms": [ + "windows", + "linux", + "posix" + ], + "cpu_cost": 0.1, + "exclude_configs": [], + "exclude_iomgrs": [ + "uv" + ], + "flaky": false, + "language": "c", + "name": "h2_proxy_test", + "platforms": [ + "windows", + "linux", + "mac", + "posix" + ] + }, + { + "args": [ + "registered_call" ], "ci_platforms": [ "windows", @@ -20318,7 +20116,7 @@ }, { "args": [ - "ping_pong_streaming" + "request_with_payload" ], "ci_platforms": [ "windows", @@ -20342,14 +20140,14 @@ }, { "args": [ - "registered_call" + "server_finishes_request" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -20366,55 +20164,7 @@ }, { "args": [ - "request_with_payload" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_proxy_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "server_finishes_request" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_proxy_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "shutdown_finishes_calls" + "shutdown_finishes_calls" ], "ci_platforms": [ "windows", @@ -21444,30 +21194,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -22572,30 +22298,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair+trace_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -23788,32 +23490,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair_1byte_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -25059,29 +24735,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_ssl_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -26351,29 +26004,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_ssl_cert_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -27508,30 +27138,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_ssl_proxy_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -28720,29 +28326,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_uds_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -29755,29 +29338,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "inproc_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -30978,29 +30538,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_census_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -32249,7 +31786,7 @@ }, { "args": [ - "payload_with_proxy_auth" + "ping" ], "ci_platforms": [ "windows", @@ -32257,7 +31794,7 @@ "mac", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [], "flaky": false, @@ -32272,30 +31809,7 @@ }, { "args": [ - "ping" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 0.1, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_compress_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, - { - "args": [ - "ping_pong_streaming" + "ping_pong_streaming" ], "ci_platforms": [ "windows", @@ -33374,29 +32888,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_fd_nosec_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -34597,29 +34088,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -35718,25 +35186,6 @@ "linux" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_full+pipe_nosec_test", - "platforms": [ - "linux" - ] - }, { "args": [ "ping" @@ -36865,29 +36314,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full+trace_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -38134,29 +37560,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_full+workarounds_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -39437,14 +38840,14 @@ }, { "args": [ - "payload_with_proxy_auth" + "ping" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 1.0, + "cpu_cost": 0.1, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -39461,7 +38864,7 @@ }, { "args": [ - "ping" + "ping_pong_streaming" ], "ci_platforms": [ "windows", @@ -39485,14 +38888,14 @@ }, { "args": [ - "ping_pong_streaming" + "proxy_auth" ], "ci_platforms": [ "windows", "linux", "posix" ], - "cpu_cost": 0.1, + "cpu_cost": 1.0, "exclude_configs": [], "exclude_iomgrs": [ "uv" @@ -40723,29 +40126,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "h2_load_reporting_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -41856,30 +41236,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_proxy_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -42984,30 +42340,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -44088,30 +43420,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair+trace_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -45278,32 +44586,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [ - "msan" - ], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_sockpair_1byte_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" @@ -46499,29 +45781,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [ - "uv" - ], - "flaky": false, - "language": "c", - "name": "h2_uds_nosec_test", - "platforms": [ - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping" @@ -47511,29 +46770,6 @@ "posix" ] }, - { - "args": [ - "payload_with_proxy_auth" - ], - "ci_platforms": [ - "windows", - "linux", - "mac", - "posix" - ], - "cpu_cost": 1.0, - "exclude_configs": [], - "exclude_iomgrs": [], - "flaky": false, - "language": "c", - "name": "inproc_nosec_test", - "platforms": [ - "windows", - "linux", - "mac", - "posix" - ] - }, { "args": [ "ping_pong_streaming" diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj index 3a45ed619c0..249d99b526b 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj @@ -227,12 +227,12 @@ - - + + diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters index 69bcf02b43d..3a2105ebe83 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_nosec_tests/end2end_nosec_tests.vcxproj.filters @@ -115,15 +115,15 @@ test\core\end2end\tests - - test\core\end2end\tests - test\core\end2end\tests test\core\end2end\tests + + test\core\end2end\tests + test\core\end2end\tests diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj index fe1f6279805..b7a2ecd27b7 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj @@ -229,12 +229,12 @@ - - + + diff --git a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters index 255a76e107e..1626b77d147 100644 --- a/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters +++ b/vsprojects/vcxproj/test/end2end/tests/end2end_tests/end2end_tests.vcxproj.filters @@ -118,15 +118,15 @@ test\core\end2end\tests - - test\core\end2end\tests - test\core\end2end\tests test\core\end2end\tests + + test\core\end2end\tests + test\core\end2end\tests From 949d075812cb617eaf4d66a880869cb400f4440b Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 20 Jul 2017 23:49:15 -0700 Subject: [PATCH 44/47] Correctly fix the case where pollset->kicked_without_poller was missed --- src/core/lib/iomgr/ev_epoll1_linux.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/core/lib/iomgr/ev_epoll1_linux.c b/src/core/lib/iomgr/ev_epoll1_linux.c index 77d59059d43..b89b8af15a9 100644 --- a/src/core/lib/iomgr/ev_epoll1_linux.c +++ b/src/core/lib/iomgr/ev_epoll1_linux.c @@ -603,7 +603,7 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, worker_insert(pollset, worker); pollset->begin_refs--; - if (worker->kick_state == UNKICKED) { + if (worker->kick_state == UNKICKED && !pollset->kicked_without_poller) { GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker); worker->initialized_cv = true; gpr_cv_init(&worker->cv); @@ -623,10 +623,13 @@ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, } *now = gpr_now(now->clock_type); } + if (GRPC_TRACER_ON(grpc_polling_trace)) { - gpr_log(GPR_ERROR, "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d", pollset, - worker, kick_state_string(worker->kick_state), - pollset->shutting_down); + gpr_log(GPR_ERROR, + "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d " + "kicked_without_poller: %d", + pollset, worker, kick_state_string(worker->kick_state), + pollset->shutting_down, pollset->kicked_without_poller); } /* We release pollset lock in this function at a couple of places: From 9d87421328490e01962397bf718ab210384705e6 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Fri, 21 Jul 2017 09:08:27 -0700 Subject: [PATCH 45/47] Disable epoll1 - so that I can merge whatever I have until now --- tools/run_tests/run_tests.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index ddfc1d68f19..611868ce5a4 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -63,8 +63,8 @@ _FORCE_ENVIRON_FOR_WRAPPERS = { } _POLLING_STRATEGIES = { - 'linux': ['epoll1', 'epollsig', 'poll', 'poll-cv'], -# TODO(ctiller, sreecha): enable epollex, epoll-thread-pool + 'linux': ['epollsig', 'poll', 'poll-cv'], +# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool 'mac': ['poll'], } From 2aab728245f118b18a8ddcff102151fcc66d6960 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Fri, 21 Jul 2017 11:25:56 -0700 Subject: [PATCH 46/47] regex to diff --- tools/profiling/microbenchmarks/bm_diff/bm_diff.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py index 1ac951f3d86..a41d0f0552f 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py @@ -67,6 +67,12 @@ def _args(): default=20, help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py' ) + argp.add_argument( + '-r', + '--regex', + type=str, + default="", + help='Regex to filter benchmarks run') argp.add_argument('--counters', dest='counters', action='store_true') argp.add_argument('--no-counters', dest='counters', action='store_false') argp.set_defaults(counters=True) @@ -212,6 +218,6 @@ def diff(bms, loops, regex, track, old, new, counters): if __name__ == '__main__': args = _args() - diff, note = diff(args.benchmarks, args.loops, args.track, args.old, + diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old, args.new, args.counters) print('%s\n%s' % (note, diff if diff else "No performance differences")) From d0c1e50ea91b9c4aefc7357a21daeae689d7bb48 Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Fri, 21 Jul 2017 12:34:38 -0700 Subject: [PATCH 47/47] Changing a few variable names and adding few safety conditions --- src/core/ext/filters/client_channel/http_proxy.c | 2 +- test/core/end2end/fixtures/h2_http_proxy.c | 6 +++--- test/core/end2end/fixtures/http_proxy_fixture.c | 15 ++++++++------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index 8a14b4e57c6..ef3512ed833 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -67,7 +67,7 @@ static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) { /* User cred found */ *user_cred = authority_strs[0]; proxy_name = authority_strs[1]; - gpr_log(GPR_INFO, "userinfo found in proxy URI"); + gpr_log(GPR_DEBUG, "userinfo found in proxy URI"); } else { /* Bad authority */ for (size_t i = 0; i < authority_nstrs; i++) { diff --git a/test/core/end2end/fixtures/h2_http_proxy.c b/test/core/end2end/fixtures/h2_http_proxy.c index fdc8d749c66..61458923652 100644 --- a/test/core/end2end/fixtures/h2_http_proxy.c +++ b/test/core/end2end/fixtures/h2_http_proxy.c @@ -68,13 +68,13 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f, char *proxy_uri; /* If testing for proxy auth, add credentials to proxy uri */ - const grpc_arg *proxy_auth = + const grpc_arg *proxy_auth_arg = grpc_channel_args_find(client_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS); - if (proxy_auth == NULL) { + if (proxy_auth_arg == NULL || proxy_auth_arg->type != GRPC_ARG_STRING) { gpr_asprintf(&proxy_uri, "http://%s", grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); } else { - gpr_asprintf(&proxy_uri, "http://%s@%s", proxy_auth->value.string, + gpr_asprintf(&proxy_uri, "http://%s@%s", proxy_auth_arg->value.string, grpc_end2end_http_proxy_get_proxy_name(ffd->proxy)); } gpr_setenv("http_proxy", proxy_uri); diff --git a/test/core/end2end/fixtures/http_proxy_fixture.c b/test/core/end2end/fixtures/http_proxy_fixture.c index 266351d181c..a4cfc77bcb7 100644 --- a/test/core/end2end/fixtures/http_proxy_fixture.c +++ b/test/core/end2end/fixtures/http_proxy_fixture.c @@ -315,7 +315,8 @@ static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg, static bool proxy_auth_header_matches(grpc_exec_ctx* exec_ctx, char* proxy_auth_header_val, char* expected_cred) { - GPR_ASSERT(proxy_auth_header_val != NULL && expected_cred != NULL); + GPR_ASSERT(proxy_auth_header_val != NULL); + GPR_ASSERT(expected_cred != NULL); if (strncmp(proxy_auth_header_val, "Basic ", 6) != 0) { return false; } @@ -377,19 +378,19 @@ static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg, return; } // If proxy auth is being used, check if the header is present and as expected - const grpc_arg* proxy_auth = grpc_channel_args_find( + const grpc_arg* proxy_auth_arg = grpc_channel_args_find( conn->proxy->channel_args, GRPC_ARG_HTTP_PROXY_AUTH_CREDS); - if (proxy_auth != NULL) { - bool auth_header_found = false; + if (proxy_auth_arg != NULL && proxy_auth_arg->type == GRPC_ARG_STRING) { + bool client_authenticated = false; for (size_t i = 0; i < conn->http_request.hdr_count; i++) { if (strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) { - auth_header_found = proxy_auth_header_matches( + client_authenticated = proxy_auth_header_matches( exec_ctx, conn->http_request.hdrs[i].value, - proxy_auth->value.string); + proxy_auth_arg->value.string); break; } } - if (!auth_header_found) { + if (!client_authenticated) { const char* msg = "HTTP Connect could not verify authentication"; error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(msg); proxy_connection_failed(exec_ctx, conn, true /* is_client */,