|
|
|
@ -20,194 +20,458 @@ |
|
|
|
|
|
|
|
|
|
#include <string.h> |
|
|
|
|
|
|
|
|
|
#include <gmock/gmock.h> |
|
|
|
|
#include <gtest/gtest.h> |
|
|
|
|
|
|
|
|
|
#include <grpc/grpc.h> |
|
|
|
|
#include <grpc/support/alloc.h> |
|
|
|
|
#include <grpc/support/log.h> |
|
|
|
|
#include <grpc/support/string_util.h> |
|
|
|
|
#include <grpc/support/sync.h> |
|
|
|
|
|
|
|
|
|
#include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h" |
|
|
|
|
#include "src/core/lib/iomgr/iomgr.h" |
|
|
|
|
#include "test/core/http/httpcli_test_util.h" |
|
|
|
|
#include "test/core/util/fake_udp_and_tcp_server.h" |
|
|
|
|
#include "test/core/util/port.h" |
|
|
|
|
#include "test/core/util/subprocess.h" |
|
|
|
|
#include "test/core/util/test_config.h" |
|
|
|
|
|
|
|
|
|
static int g_done = 0; |
|
|
|
|
static gpr_mu* g_mu; |
|
|
|
|
static grpc_polling_entity g_pops; |
|
|
|
|
namespace { |
|
|
|
|
|
|
|
|
|
static grpc_millis n_seconds_time(int seconds) { |
|
|
|
|
grpc_millis NSecondsTime(int seconds) { |
|
|
|
|
return grpc_timespec_to_millis_round_up( |
|
|
|
|
grpc_timeout_seconds_to_deadline(seconds)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void on_finish(void* arg, grpc_error_handle error) { |
|
|
|
|
const char* expect = |
|
|
|
|
"<html><head><title>Hello world!</title></head>" |
|
|
|
|
"<body><p>This is a test</p></body></html>"; |
|
|
|
|
grpc_http_response* response = static_cast<grpc_http_response*>(arg); |
|
|
|
|
GPR_ASSERT(response); |
|
|
|
|
gpr_log(GPR_INFO, "response status=%d error=%s", response->status, |
|
|
|
|
grpc_error_std_string(error).c_str()); |
|
|
|
|
GPR_ASSERT(response->status == 200); |
|
|
|
|
GPR_ASSERT(response->body_length == strlen(expect)); |
|
|
|
|
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length)); |
|
|
|
|
gpr_mu_lock(g_mu); |
|
|
|
|
g_done = 1; |
|
|
|
|
GPR_ASSERT(GRPC_LOG_IF_ERROR( |
|
|
|
|
"pollset_kick", |
|
|
|
|
grpc_pollset_kick(grpc_polling_entity_pollset(&g_pops), nullptr))); |
|
|
|
|
gpr_mu_unlock(g_mu); |
|
|
|
|
absl::Time AbslDeadlineSeconds(int s) { |
|
|
|
|
return grpc_core::ToAbslTime(grpc_timeout_seconds_to_deadline(s)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void test_get(int port) { |
|
|
|
|
grpc_httpcli_request req; |
|
|
|
|
char* host; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
|
|
|
|
|
g_done = 0; |
|
|
|
|
gpr_log(GPR_INFO, "test_get"); |
|
|
|
|
int g_argc; |
|
|
|
|
char** g_argv; |
|
|
|
|
int g_server_port; |
|
|
|
|
gpr_subprocess* g_server; |
|
|
|
|
|
|
|
|
|
gpr_asprintf(&host, "localhost:%d", port); |
|
|
|
|
gpr_log(GPR_INFO, "requesting from %s", host); |
|
|
|
|
class HttpRequestTest : public ::testing::Test { |
|
|
|
|
public: |
|
|
|
|
HttpRequestTest() { |
|
|
|
|
grpc_init(); |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
grpc_pollset* pollset = |
|
|
|
|
static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); |
|
|
|
|
grpc_pollset_init(pollset, &mu_); |
|
|
|
|
pops_ = grpc_polling_entity_create_from_pollset(pollset); |
|
|
|
|
} |
|
|
|
|
~HttpRequestTest() override { |
|
|
|
|
{ |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
grpc_pollset_shutdown( |
|
|
|
|
grpc_polling_entity_pollset(&pops_), |
|
|
|
|
GRPC_CLOSURE_CREATE(DestroyPops, &pops_, grpc_schedule_on_exec_ctx)); |
|
|
|
|
} |
|
|
|
|
grpc_shutdown(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
req.host = host; |
|
|
|
|
req.http.path = const_cast<char*>("/get"); |
|
|
|
|
req.handshaker = &grpc_httpcli_plaintext; |
|
|
|
|
|
|
|
|
|
grpc_http_response response; |
|
|
|
|
response = {}; |
|
|
|
|
grpc_httpcli_get( |
|
|
|
|
&g_pops, grpc_core::ResourceQuota::Default(), &req, n_seconds_time(15), |
|
|
|
|
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx), |
|
|
|
|
&response); |
|
|
|
|
gpr_mu_lock(g_mu); |
|
|
|
|
while (!g_done) { |
|
|
|
|
grpc_pollset_worker* worker = nullptr; |
|
|
|
|
void RunAndKick(const std::function<void()>& f) { |
|
|
|
|
grpc_core::MutexLockForGprMu lock(mu_); |
|
|
|
|
f(); |
|
|
|
|
GPR_ASSERT(GRPC_LOG_IF_ERROR( |
|
|
|
|
"pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops), |
|
|
|
|
&worker, n_seconds_time(1)))); |
|
|
|
|
gpr_mu_unlock(g_mu); |
|
|
|
|
"pollset_kick", |
|
|
|
|
grpc_pollset_kick(grpc_polling_entity_pollset(&pops_), nullptr))); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
gpr_mu_lock(g_mu); |
|
|
|
|
void PollUntil(const std::function<bool()>& predicate, absl::Time deadline) { |
|
|
|
|
gpr_mu_lock(mu_); |
|
|
|
|
while (!predicate()) { |
|
|
|
|
GPR_ASSERT(absl::Now() < deadline); |
|
|
|
|
grpc_pollset_worker* worker = nullptr; |
|
|
|
|
GPR_ASSERT(GRPC_LOG_IF_ERROR( |
|
|
|
|
"pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&pops_), |
|
|
|
|
&worker, NSecondsTime(1)))); |
|
|
|
|
gpr_mu_unlock(mu_); |
|
|
|
|
gpr_mu_lock(mu_); |
|
|
|
|
} |
|
|
|
|
gpr_mu_unlock(mu_); |
|
|
|
|
} |
|
|
|
|
gpr_mu_unlock(g_mu); |
|
|
|
|
gpr_free(host); |
|
|
|
|
grpc_http_response_destroy(&response); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void test_post(int port) { |
|
|
|
|
grpc_httpcli_request req; |
|
|
|
|
char* host; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
grpc_polling_entity* pops() { return &pops_; } |
|
|
|
|
|
|
|
|
|
g_done = 0; |
|
|
|
|
gpr_log(GPR_INFO, "test_post"); |
|
|
|
|
protected: |
|
|
|
|
static void SetUpTestSuite() { |
|
|
|
|
auto test_server = grpc_core::testing::StartHttpRequestTestServer( |
|
|
|
|
g_argc, g_argv, false /* use_ssl */); |
|
|
|
|
g_server = test_server.server; |
|
|
|
|
g_server_port = test_server.port; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
gpr_asprintf(&host, "localhost:%d", port); |
|
|
|
|
gpr_log(GPR_INFO, "posting to %s", host); |
|
|
|
|
static void TearDownTestSuite() { gpr_subprocess_destroy(g_server); } |
|
|
|
|
|
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
req.host = host; |
|
|
|
|
req.http.path = const_cast<char*>("/post"); |
|
|
|
|
req.handshaker = &grpc_httpcli_plaintext; |
|
|
|
|
|
|
|
|
|
grpc_http_response response; |
|
|
|
|
response = {}; |
|
|
|
|
grpc_httpcli_post( |
|
|
|
|
&g_pops, grpc_core::ResourceQuota::Default(), &req, "hello", 5, |
|
|
|
|
n_seconds_time(15), |
|
|
|
|
GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx), |
|
|
|
|
&response); |
|
|
|
|
gpr_mu_lock(g_mu); |
|
|
|
|
while (!g_done) { |
|
|
|
|
grpc_pollset_worker* worker = nullptr; |
|
|
|
|
GPR_ASSERT(GRPC_LOG_IF_ERROR( |
|
|
|
|
"pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops), |
|
|
|
|
&worker, n_seconds_time(1)))); |
|
|
|
|
gpr_mu_unlock(g_mu); |
|
|
|
|
private: |
|
|
|
|
static void DestroyPops(void* p, grpc_error_handle /*error*/) { |
|
|
|
|
grpc_polling_entity* pops = static_cast<grpc_polling_entity*>(p); |
|
|
|
|
grpc_pollset_destroy(grpc_polling_entity_pollset(pops)); |
|
|
|
|
gpr_free(grpc_polling_entity_pollset(pops)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
gpr_mu* mu_; |
|
|
|
|
grpc_polling_entity pops_; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
struct RequestState { |
|
|
|
|
explicit RequestState(HttpRequestTest* test) : test(test) {} |
|
|
|
|
|
|
|
|
|
gpr_mu_lock(g_mu); |
|
|
|
|
~RequestState() { |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
grpc_http_response_destroy(&response); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
HttpRequestTest* test; |
|
|
|
|
bool done = false; |
|
|
|
|
grpc_http_response response = {}; |
|
|
|
|
grpc_pollset_set* pollset_set_to_destroy_eagerly = nullptr; |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
void OnFinish(void* arg, grpc_error_handle error) { |
|
|
|
|
RequestState* request_state = static_cast<RequestState*>(arg); |
|
|
|
|
if (request_state->pollset_set_to_destroy_eagerly != nullptr) { |
|
|
|
|
// Destroy the request's polling entity param. The goal is to try to catch a
|
|
|
|
|
// bug where we might still be referencing the polling entity by
|
|
|
|
|
// a pending TCP connect.
|
|
|
|
|
grpc_pollset_set_destroy(request_state->pollset_set_to_destroy_eagerly); |
|
|
|
|
} |
|
|
|
|
gpr_mu_unlock(g_mu); |
|
|
|
|
gpr_free(host); |
|
|
|
|
grpc_http_response_destroy(&response); |
|
|
|
|
const char* expect = |
|
|
|
|
"<html><head><title>Hello world!</title></head>" |
|
|
|
|
"<body><p>This is a test</p></body></html>"; |
|
|
|
|
GPR_ASSERT(error == GRPC_ERROR_NONE); |
|
|
|
|
grpc_http_response response = request_state->response; |
|
|
|
|
gpr_log(GPR_INFO, "response status=%d error=%s", response.status, |
|
|
|
|
grpc_error_std_string(error).c_str()); |
|
|
|
|
GPR_ASSERT(response.status == 200); |
|
|
|
|
GPR_ASSERT(response.body_length == strlen(expect)); |
|
|
|
|
GPR_ASSERT(0 == memcmp(expect, response.body, response.body_length)); |
|
|
|
|
request_state->test->RunAndKick( |
|
|
|
|
[request_state]() { request_state->done = true; }); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void destroy_pops(void* p, grpc_error_handle /*error*/) { |
|
|
|
|
grpc_pollset_destroy( |
|
|
|
|
grpc_polling_entity_pollset(static_cast<grpc_polling_entity*>(p))); |
|
|
|
|
void OnFinishExpectFailure(void* arg, grpc_error_handle error) { |
|
|
|
|
RequestState* request_state = static_cast<RequestState*>(arg); |
|
|
|
|
if (request_state->pollset_set_to_destroy_eagerly != nullptr) { |
|
|
|
|
// Destroy the request's polling entity param. The goal is to try to catch a
|
|
|
|
|
// bug where we might still be referencing the polling entity by
|
|
|
|
|
// a pending TCP connect.
|
|
|
|
|
grpc_pollset_set_destroy(request_state->pollset_set_to_destroy_eagerly); |
|
|
|
|
} |
|
|
|
|
grpc_http_response response = request_state->response; |
|
|
|
|
gpr_log(GPR_INFO, "response status=%d error=%s", response.status, |
|
|
|
|
grpc_error_std_string(error).c_str()); |
|
|
|
|
GPR_ASSERT(error != GRPC_ERROR_NONE); |
|
|
|
|
request_state->test->RunAndKick( |
|
|
|
|
[request_state]() { request_state->done = true; }); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int main(int argc, char** argv) { |
|
|
|
|
gpr_subprocess* server; |
|
|
|
|
grpc::testing::TestEnvironment env(argc, argv); |
|
|
|
|
grpc_init(); |
|
|
|
|
{ |
|
|
|
|
grpc_closure destroyed; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
char* me = argv[0]; |
|
|
|
|
char* lslash = strrchr(me, '/'); |
|
|
|
|
char* args[4]; |
|
|
|
|
int port = grpc_pick_unused_port_or_die(); |
|
|
|
|
int arg_shift = 0; |
|
|
|
|
/* figure out where we are */ |
|
|
|
|
char* root; |
|
|
|
|
if (lslash != nullptr) { |
|
|
|
|
/* Hack for bazel target */ |
|
|
|
|
if (static_cast<unsigned>(lslash - me) >= (sizeof("http") - 1) && |
|
|
|
|
strncmp(me + (lslash - me) - sizeof("http") + 1, "http", |
|
|
|
|
sizeof("http") - 1) == 0) { |
|
|
|
|
lslash = me + (lslash - me) - sizeof("http"); |
|
|
|
|
} |
|
|
|
|
root = static_cast<char*>( |
|
|
|
|
gpr_malloc(static_cast<size_t>(lslash - me + sizeof("/../..")))); |
|
|
|
|
memcpy(root, me, static_cast<size_t>(lslash - me)); |
|
|
|
|
memcpy(root + (lslash - me), "/../..", sizeof("/../..")); |
|
|
|
|
} else { |
|
|
|
|
root = gpr_strdup("."); |
|
|
|
|
} |
|
|
|
|
TEST_F(HttpRequestTest, Get) { |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
std::string host = absl::StrFormat("localhost:%d", g_server_port); |
|
|
|
|
gpr_log(GPR_INFO, "requesting from %s", host.c_str()); |
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
auto uri = grpc_core::URI::Create("http", host, "/get", {} /* query params */, |
|
|
|
|
"" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Get( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, pops(), &req, |
|
|
|
|
NSecondsTime(15), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinish, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
http_request->Start(); |
|
|
|
|
PollUntil([&request_state]() { return request_state.done; }, |
|
|
|
|
AbslDeadlineSeconds(60)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GPR_ASSERT(argc <= 2); |
|
|
|
|
if (argc == 2) { |
|
|
|
|
args[0] = gpr_strdup(argv[1]); |
|
|
|
|
} else { |
|
|
|
|
arg_shift = 1; |
|
|
|
|
gpr_asprintf(&args[0], "%s/test/core/http/python_wrapper.sh", root); |
|
|
|
|
gpr_asprintf(&args[1], "%s/test/core/http/test_server.py", root); |
|
|
|
|
} |
|
|
|
|
TEST_F(HttpRequestTest, Post) { |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
std::string host = absl::StrFormat("localhost:%d", g_server_port); |
|
|
|
|
gpr_log(GPR_INFO, "posting to %s", host.c_str()); |
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
req.body = const_cast<char*>("hello"); |
|
|
|
|
req.body_length = 5; |
|
|
|
|
auto uri = grpc_core::URI::Create("http", host, "/post", |
|
|
|
|
{} /* query params */, "" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Post( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, pops(), &req, |
|
|
|
|
NSecondsTime(15), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinish, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
http_request->Start(); |
|
|
|
|
PollUntil([&request_state]() { return request_state.done; }, |
|
|
|
|
AbslDeadlineSeconds(60)); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* start the server */ |
|
|
|
|
args[1 + arg_shift] = const_cast<char*>("--port"); |
|
|
|
|
gpr_asprintf(&args[2 + arg_shift], "%d", port); |
|
|
|
|
server = |
|
|
|
|
gpr_subprocess_create(3 + arg_shift, const_cast<const char**>(args)); |
|
|
|
|
GPR_ASSERT(server); |
|
|
|
|
gpr_free(args[0]); |
|
|
|
|
if (arg_shift) gpr_free(args[1]); |
|
|
|
|
gpr_free(args[2 + arg_shift]); |
|
|
|
|
gpr_free(root); |
|
|
|
|
int g_fake_non_responsive_dns_server_port; |
|
|
|
|
|
|
|
|
|
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), |
|
|
|
|
gpr_time_from_seconds(5, GPR_TIMESPAN))); |
|
|
|
|
void InjectNonResponsiveDNSServer(ares_channel channel) { |
|
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
|
"Injecting broken nameserver list. Bad server address:|[::1]:%d|.", |
|
|
|
|
g_fake_non_responsive_dns_server_port); |
|
|
|
|
// Configure a non-responsive DNS server at the front of c-ares's nameserver
|
|
|
|
|
// list.
|
|
|
|
|
struct ares_addr_port_node dns_server_addrs[1]; |
|
|
|
|
dns_server_addrs[0].family = AF_INET6; |
|
|
|
|
(reinterpret_cast<char*>(&dns_server_addrs[0].addr.addr6))[15] = 0x1; |
|
|
|
|
dns_server_addrs[0].tcp_port = g_fake_non_responsive_dns_server_port; |
|
|
|
|
dns_server_addrs[0].udp_port = g_fake_non_responsive_dns_server_port; |
|
|
|
|
dns_server_addrs[0].next = nullptr; |
|
|
|
|
GPR_ASSERT(ares_set_servers_ports(channel, dns_server_addrs) == ARES_SUCCESS); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
grpc_pollset* pollset = |
|
|
|
|
static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size())); |
|
|
|
|
grpc_pollset_init(pollset, &g_mu); |
|
|
|
|
g_pops = grpc_polling_entity_create_from_pollset(pollset); |
|
|
|
|
TEST_F(HttpRequestTest, CancelGetDuringDNSResolution) { |
|
|
|
|
// Inject an unresponsive DNS server into the resolver's DNS server config
|
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer fake_dns_server( |
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer::AcceptMode:: |
|
|
|
|
kWaitForClientToSendFirstBytes, |
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer::CloseSocketUponCloseFromPeer); |
|
|
|
|
g_fake_non_responsive_dns_server_port = fake_dns_server.port(); |
|
|
|
|
void (*prev_test_only_inject_config)(ares_channel channel) = |
|
|
|
|
grpc_ares_test_only_inject_config; |
|
|
|
|
grpc_ares_test_only_inject_config = InjectNonResponsiveDNSServer; |
|
|
|
|
// Run the same test on several threads in parallel to try to trigger races
|
|
|
|
|
// etc.
|
|
|
|
|
int kNumThreads = 100; |
|
|
|
|
std::vector<std::thread> threads; |
|
|
|
|
threads.reserve(kNumThreads); |
|
|
|
|
for (int i = 0; i < kNumThreads; i++) { |
|
|
|
|
threads.push_back(std::thread([this]() { |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
memset(&req, 0, sizeof(grpc_http_request)); |
|
|
|
|
auto uri = grpc_core::URI::Create( |
|
|
|
|
"http", "dont-care-since-wont-be-resolved.test.com:443", "/get", |
|
|
|
|
{} /* query params */, "" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Get( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, pops(), &req, |
|
|
|
|
NSecondsTime(120), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinishExpectFailure, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
http_request->Start(); |
|
|
|
|
std::thread cancel_thread([&http_request]() { |
|
|
|
|
gpr_sleep_until(grpc_timeout_seconds_to_deadline(1)); |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
http_request.reset(); |
|
|
|
|
}); |
|
|
|
|
// Poll with a deadline explicitly lower than the request timeout, so
|
|
|
|
|
// that we know that the request timeout isn't just kicking in.
|
|
|
|
|
PollUntil([&request_state]() { return request_state.done; }, |
|
|
|
|
AbslDeadlineSeconds(60)); |
|
|
|
|
cancel_thread.join(); |
|
|
|
|
})); |
|
|
|
|
} |
|
|
|
|
for (auto& t : threads) { |
|
|
|
|
t.join(); |
|
|
|
|
} |
|
|
|
|
grpc_ares_test_only_inject_config = prev_test_only_inject_config; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
test_get(port); |
|
|
|
|
test_post(port); |
|
|
|
|
TEST_F(HttpRequestTest, CancelGetWhileReadingResponse) { |
|
|
|
|
// Start up a fake HTTP server which just accepts connections
|
|
|
|
|
// and then hangs, i.e. does not send back any bytes to the client.
|
|
|
|
|
// The goal here is to get the client to connect to this fake server
|
|
|
|
|
// and send a request, and then sit waiting for a response. Then, a
|
|
|
|
|
// separate thread will cancel the HTTP request, and that should let it
|
|
|
|
|
// complete.
|
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer fake_http_server( |
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer::AcceptMode:: |
|
|
|
|
kWaitForClientToSendFirstBytes, |
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer::CloseSocketUponCloseFromPeer); |
|
|
|
|
// Run the same test on several threads in parallel to try to trigger races
|
|
|
|
|
// etc.
|
|
|
|
|
int kNumThreads = 100; |
|
|
|
|
std::vector<std::thread> threads; |
|
|
|
|
threads.reserve(kNumThreads); |
|
|
|
|
for (int i = 0; i < kNumThreads; i++) { |
|
|
|
|
grpc_core::testing::FakeUdpAndTcpServer* fake_http_server_ptr = |
|
|
|
|
&fake_http_server; |
|
|
|
|
threads.push_back(std::thread([this, fake_http_server_ptr]() { |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
auto uri = grpc_core::URI::Create("http", fake_http_server_ptr->address(), |
|
|
|
|
"/get", {} /* query params */, |
|
|
|
|
"" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Get( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, pops(), &req, |
|
|
|
|
NSecondsTime(120), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinishExpectFailure, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
http_request->Start(); |
|
|
|
|
exec_ctx.Flush(); |
|
|
|
|
std::thread cancel_thread([&http_request]() { |
|
|
|
|
gpr_sleep_until(grpc_timeout_seconds_to_deadline(1)); |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
http_request.reset(); |
|
|
|
|
}); |
|
|
|
|
// Poll with a deadline explicitly lower than the request timeout, so
|
|
|
|
|
// that we know that the request timeout isn't just kicking in.
|
|
|
|
|
PollUntil([&request_state]() { return request_state.done; }, |
|
|
|
|
AbslDeadlineSeconds(60)); |
|
|
|
|
cancel_thread.join(); |
|
|
|
|
})); |
|
|
|
|
} |
|
|
|
|
for (auto& t : threads) { |
|
|
|
|
t.join(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops, |
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
grpc_pollset_shutdown(grpc_polling_entity_pollset(&g_pops), &destroyed); |
|
|
|
|
// The main point of this test is just to exercise the machinery around
|
|
|
|
|
// cancellation during TCP connection establishment, to make sure there are no
|
|
|
|
|
// crashes/races etc. This test doesn't actually verify that cancellation during
|
|
|
|
|
// TCP setup is happening, though. For that, we would need to induce packet loss
|
|
|
|
|
// in the test.
|
|
|
|
|
TEST_F(HttpRequestTest, CancelGetRacesWithConnectionFailure) { |
|
|
|
|
// Grab an unoccupied port but don't listen on it. The goal
|
|
|
|
|
// here is just to have a server address that will reject
|
|
|
|
|
// TCP connection setups.
|
|
|
|
|
// Note that because the server is rejecting TCP connections, we
|
|
|
|
|
// don't really need to cancel the HTTP requests in this test case
|
|
|
|
|
// in order for them proceeed i.e. in order for them to pass. The test
|
|
|
|
|
// is still beneficial though because it can exercise the same code paths
|
|
|
|
|
// that would get taken if the HTTP request was cancelled while the TCP
|
|
|
|
|
// connect attempt was actually hanging.
|
|
|
|
|
int fake_server_port = grpc_pick_unused_port_or_die(); |
|
|
|
|
std::string fake_server_address = |
|
|
|
|
absl::StrCat("[::1]:", std::to_string(fake_server_port)); |
|
|
|
|
// Run the same test on several threads in parallel to try to trigger races
|
|
|
|
|
// etc.
|
|
|
|
|
int kNumThreads = 100; |
|
|
|
|
std::vector<std::thread> threads; |
|
|
|
|
threads.reserve(kNumThreads); |
|
|
|
|
for (int i = 0; i < kNumThreads; i++) { |
|
|
|
|
threads.push_back(std::thread([this, fake_server_address]() { |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
auto uri = |
|
|
|
|
grpc_core::URI::Create("http", fake_server_address, "/get", |
|
|
|
|
{} /* query params */, "" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Get( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, pops(), &req, |
|
|
|
|
NSecondsTime(120), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinishExpectFailure, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
// Start the HTTP request. We will ~immediately begin a TCP connect
|
|
|
|
|
// attempt because there's no name to resolve.
|
|
|
|
|
http_request->Start(); |
|
|
|
|
exec_ctx.Flush(); |
|
|
|
|
// Spawn a separate thread which ~immediately cancels the HTTP request.
|
|
|
|
|
// Note that even though the server is rejecting TCP connections, it can
|
|
|
|
|
// still take some time for the client to receive that rejection. So
|
|
|
|
|
// cancelling the request now can trigger the code paths that would get
|
|
|
|
|
// taken if the TCP connection was truly hanging e.g. from packet loss.
|
|
|
|
|
// The goal is just to make sure there are no crashes, races, etc.
|
|
|
|
|
std::thread cancel_thread([&http_request]() { |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
http_request.reset(); |
|
|
|
|
}); |
|
|
|
|
// Poll with a deadline explicitly lower than the request timeout, so
|
|
|
|
|
// that we know that the request timeout isn't just kicking in.
|
|
|
|
|
PollUntil([&request_state]() { return request_state.done; }, |
|
|
|
|
AbslDeadlineSeconds(60)); |
|
|
|
|
cancel_thread.join(); |
|
|
|
|
})); |
|
|
|
|
} |
|
|
|
|
grpc_shutdown(); |
|
|
|
|
for (auto& t : threads) { |
|
|
|
|
t.join(); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
gpr_free(grpc_polling_entity_pollset(&g_pops)); |
|
|
|
|
// The pollent parameter passed to HttpRequest::Get or Post is owned by
|
|
|
|
|
// the caller and must not be referenced by the HttpRequest after the
|
|
|
|
|
// requests's on_done callback is invoked. This test verifies that this
|
|
|
|
|
// isn't happening by destroying the request's pollset set within the
|
|
|
|
|
// on_done callback.
|
|
|
|
|
TEST_F(HttpRequestTest, CallerPollentsAreNotReferencedAfterCallbackIsRan) { |
|
|
|
|
// Grab an unoccupied port but don't listen on it. The goal
|
|
|
|
|
// here is just to have a server address that will reject
|
|
|
|
|
// TCP connection setups.
|
|
|
|
|
// Note that we could have used a different server for this test case, e.g.
|
|
|
|
|
// one which accepts TCP connections. All we need here is something for the
|
|
|
|
|
// client to connect to, since it will be cancelled roughly during the
|
|
|
|
|
// connection attempt anyways.
|
|
|
|
|
int fake_server_port = grpc_pick_unused_port_or_die(); |
|
|
|
|
std::string fake_server_address = |
|
|
|
|
absl::StrCat("[::1]:", std::to_string(fake_server_port)); |
|
|
|
|
RequestState request_state(this); |
|
|
|
|
grpc_http_request req; |
|
|
|
|
grpc_core::ExecCtx exec_ctx; |
|
|
|
|
memset(&req, 0, sizeof(req)); |
|
|
|
|
req.path = const_cast<char*>("/get"); |
|
|
|
|
request_state.pollset_set_to_destroy_eagerly = grpc_pollset_set_create(); |
|
|
|
|
grpc_polling_entity_add_to_pollset_set( |
|
|
|
|
pops(), request_state.pollset_set_to_destroy_eagerly); |
|
|
|
|
grpc_polling_entity wrapped_pollset_set_to_destroy_eagerly = |
|
|
|
|
grpc_polling_entity_create_from_pollset_set( |
|
|
|
|
request_state.pollset_set_to_destroy_eagerly); |
|
|
|
|
auto uri = grpc_core::URI::Create("http", fake_server_address, "/get", |
|
|
|
|
{} /* query params */, "" /* fragment */); |
|
|
|
|
GPR_ASSERT(uri.ok()); |
|
|
|
|
grpc_core::OrphanablePtr<grpc_core::HttpRequest> http_request = |
|
|
|
|
grpc_core::HttpRequest::Get( |
|
|
|
|
std::move(*uri), nullptr /* channel args */, |
|
|
|
|
&wrapped_pollset_set_to_destroy_eagerly, &req, NSecondsTime(15), |
|
|
|
|
GRPC_CLOSURE_CREATE(OnFinishExpectFailure, &request_state, |
|
|
|
|
grpc_schedule_on_exec_ctx), |
|
|
|
|
&request_state.response, |
|
|
|
|
grpc_core::RefCountedPtr<grpc_channel_credentials>( |
|
|
|
|
grpc_insecure_credentials_create())); |
|
|
|
|
// Start the HTTP request. We'll start the TCP connect attempt right away.
|
|
|
|
|
http_request->Start(); |
|
|
|
|
exec_ctx.Flush(); |
|
|
|
|
http_request.reset(); // cancel the request
|
|
|
|
|
// Since the request was cancelled, the on_done callback should be flushed
|
|
|
|
|
// out on the ExecCtx flush below. When the on_done callback is ran, it will
|
|
|
|
|
// eagerly destroy 'request_state.pollset_set_to_destroy_eagerly'. Thus, we
|
|
|
|
|
// can't poll on that pollset here.
|
|
|
|
|
exec_ctx.Flush(); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
gpr_subprocess_destroy(server); |
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
int main(int argc, char** argv) { |
|
|
|
|
::testing::InitGoogleTest(&argc, argv); |
|
|
|
|
grpc::testing::TestEnvironment env(argc, argv); |
|
|
|
|
// launch the test server later, so that --gtest_list_tests works
|
|
|
|
|
g_argc = argc; |
|
|
|
|
g_argv = argv; |
|
|
|
|
// run tests
|
|
|
|
|
return RUN_ALL_TESTS(); |
|
|
|
|
} |
|
|
|
|