GOAWAY & Reconnection support.

Clients stay connected to a server after it shutdowns until all active calls
have completed, and then they drop.

After a GOAWAY or a disconnect, clients will attempt to re-resolve and
reconnect to the server.
	Change on 2014/12/15 by ctiller <ctiller@google.com>
-------------
Created by MOE: http://code.google.com/p/moe-java
MOE_MIGRATED_REVID=82178515
pull/1/merge
ctiller 10 years ago committed by Nicolas Noble
parent 608254011d
commit c6d61c4dd6
  1. 252
      Makefile
  2. 156
      src/core/channel/child_channel.c
  3. 46
      src/core/channel/client_channel.c
  4. 26
      src/core/iomgr/iomgr_libevent.c
  5. 7
      src/core/iomgr/iomgr_libevent.h
  6. 2
      src/core/iomgr/tcp_client_posix.c
  7. 2
      src/core/iomgr/tcp_posix.c
  8. 9
      src/core/iomgr/tcp_server_posix.c
  9. 3
      src/core/surface/call.c
  10. 26
      test/core/end2end/dualstack_socket_test.c
  11. 6
      test/core/end2end/fixtures/chttp2_fake_security.c
  12. 13
      test/core/end2end/fixtures/chttp2_fullstack.c
  13. 6
      test/core/end2end/fixtures/chttp2_simple_ssl_fullstack.c
  14. 6
      test/core/end2end/fixtures/chttp2_simple_ssl_with_oauth2_fullstack.c
  15. 9
      test/core/end2end/fixtures/chttp2_socket_pair.c
  16. 9
      test/core/end2end/fixtures/chttp2_socket_pair_one_byte_at_a_time.c
  17. 1
      test/core/end2end/gen_build_json.py
  18. 168
      test/core/end2end/tests/disappearing_server.c
  19. 10
      test/core/iomgr/fd_posix_test.c

File diff suppressed because one or more lines are too long

@ -38,20 +38,30 @@
/* Link back filter: passes up calls to the client channel, pushes down calls
down */
static void unref_channel(grpc_child_channel *channel);
static void maybe_destroy_channel(grpc_child_channel *channel);
typedef struct {
gpr_mu mu;
gpr_cv cv;
grpc_channel_element *back;
gpr_refcount refs;
int calling_back;
int sent_goaway;
/* # of active calls on the channel */
gpr_uint32 active_calls;
/* has grpc_child_channel_destroy been called? */
gpr_uint8 destroyed;
/* has the transport reported itself disconnected? */
gpr_uint8 disconnected;
/* are we calling 'back' - our parent channel */
gpr_uint8 calling_back;
/* have we or our parent sent goaway yet? - dup suppression */
gpr_uint8 sent_goaway;
/* are we currently sending farewell (in this file: goaway + disconnect) */
gpr_uint8 sending_farewell;
/* have we sent farewell (goaway + disconnect) */
gpr_uint8 sent_farewell;
} lb_channel_data;
typedef struct {
grpc_call_element *back;
gpr_refcount refs;
grpc_child_channel *channel;
} lb_call_data;
@ -69,10 +79,6 @@ static void lb_call_op(grpc_call_element *elem, grpc_call_element *from_elem,
}
}
static void delayed_unref(void *elem, grpc_iomgr_cb_status status) {
unref_channel(grpc_channel_stack_from_top_element(elem));
}
/* Currently we assume all channel operations should just be pushed up. */
static void lb_channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem,
@ -92,6 +98,8 @@ static void lb_channel_op(grpc_channel_element *elem,
chand->calling_back--;
gpr_cv_broadcast(&chand->cv);
gpr_mu_unlock(&chand->mu);
} else if (op->type == GRPC_TRANSPORT_GOAWAY) {
gpr_slice_unref(op->data.goaway.message);
}
break;
case GRPC_CALL_DOWN:
@ -101,7 +109,10 @@ static void lb_channel_op(grpc_channel_element *elem,
switch (op->type) {
case GRPC_TRANSPORT_CLOSED:
grpc_iomgr_add_callback(delayed_unref, elem);
gpr_mu_lock(&chand->mu);
chand->disconnected = 1;
maybe_destroy_channel(grpc_channel_stack_from_top_element(elem));
gpr_mu_unlock(&chand->mu);
break;
case GRPC_CHANNEL_GOAWAY:
gpr_mu_lock(&chand->mu);
@ -132,12 +143,14 @@ static void lb_init_channel_elem(grpc_channel_element *elem,
GPR_ASSERT(!is_last);
gpr_mu_init(&chand->mu);
gpr_cv_init(&chand->cv);
/* one ref for getting grpc_child_channel_destroy called, one for getting
disconnected */
gpr_ref_init(&chand->refs, 2);
chand->back = NULL;
chand->destroyed = 0;
chand->disconnected = 0;
chand->active_calls = 0;
chand->sent_goaway = 0;
chand->calling_back = 0;
chand->sending_farewell = 0;
chand->sent_farewell = 0;
}
/* Destructor for channel_data */
@ -164,35 +177,59 @@ const grpc_channel_filter grpc_child_channel_top_filter = {
#define LINK_BACK_ELEM_FROM_CALL(call) grpc_call_stack_element((call), 0)
static void unref_channel(grpc_child_channel *channel) {
lb_channel_data *lb = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
if (gpr_unref(&lb->refs)) {
grpc_channel_stack_destroy(channel);
gpr_free(channel);
}
static void finally_destroy_channel(void *c, grpc_iomgr_cb_status status) {
grpc_child_channel *channel = c;
lb_channel_data *chand = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
/* wait for the initiator to leave the mutex */
gpr_mu_lock(&chand->mu);
gpr_mu_unlock(&chand->mu);
grpc_channel_stack_destroy(channel);
gpr_free(channel);
}
static void ref_channel(grpc_child_channel *channel) {
lb_channel_data *lb = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
gpr_ref(&lb->refs);
}
static void send_farewells(void *c, grpc_iomgr_cb_status status) {
grpc_child_channel *channel = c;
grpc_channel_element *lbelem = LINK_BACK_ELEM_FROM_CHANNEL(channel);
lb_channel_data *chand = lbelem->channel_data;
int send_goaway;
grpc_channel_op op;
gpr_mu_lock(&chand->mu);
send_goaway = !chand->sent_goaway;
chand->sent_goaway = 1;
gpr_mu_unlock(&chand->mu);
static void unref_call(grpc_child_call *call) {
lb_call_data *lb = LINK_BACK_ELEM_FROM_CALL(call)->call_data;
if (gpr_unref(&lb->refs)) {
grpc_child_channel *channel = lb->channel;
grpc_call_stack_destroy(call);
gpr_free(call);
unref_channel(channel);
if (send_goaway) {
op.type = GRPC_CHANNEL_GOAWAY;
op.dir = GRPC_CALL_DOWN;
op.data.goaway.status = GRPC_STATUS_OK;
op.data.goaway.message = gpr_slice_from_copied_string("Client disconnect");
grpc_channel_next_op(lbelem, &op);
}
op.type = GRPC_CHANNEL_DISCONNECT;
op.dir = GRPC_CALL_DOWN;
grpc_channel_next_op(lbelem, &op);
gpr_mu_lock(&chand->mu);
chand->sending_farewell = 0;
chand->sent_farewell = 1;
maybe_destroy_channel(channel);
gpr_mu_unlock(&chand->mu);
}
#if 0
static void ref_call(grpc_child_call *call) {
lb_call_data *lb = LINK_BACK_ELEM_FROM_CALL(call)->call_data;
gpr_ref(&lb->refs);
static void maybe_destroy_channel(grpc_child_channel *channel) {
lb_channel_data *chand = LINK_BACK_ELEM_FROM_CHANNEL(channel)->channel_data;
if (chand->destroyed && chand->disconnected && chand->active_calls == 0 &&
!chand->sending_farewell) {
grpc_iomgr_add_callback(finally_destroy_channel, channel);
} else if (chand->destroyed && !chand->disconnected &&
chand->active_calls == 0 && !chand->sending_farewell &&
!chand->sent_farewell) {
chand->sending_farewell = 1;
grpc_iomgr_add_callback(send_farewells, channel);
}
}
#endif
grpc_child_channel *grpc_child_channel_create(
grpc_channel_element *parent, const grpc_channel_filter **filters,
@ -209,12 +246,10 @@ grpc_child_channel *grpc_child_channel_create(
lb->back = parent;
gpr_mu_unlock(&lb->mu);
return (grpc_child_channel *)stk;
return stk;
}
void grpc_child_channel_destroy(grpc_child_channel *channel) {
grpc_channel_op op;
int send_goaway = 0;
grpc_channel_element *lbelem = LINK_BACK_ELEM_FROM_CHANNEL(channel);
lb_channel_data *chand = lbelem->channel_data;
@ -222,24 +257,10 @@ void grpc_child_channel_destroy(grpc_child_channel *channel) {
while (chand->calling_back) {
gpr_cv_wait(&chand->cv, &chand->mu, gpr_inf_future);
}
send_goaway = !chand->sent_goaway;
chand->sent_goaway = 1;
chand->back = NULL;
chand->destroyed = 1;
maybe_destroy_channel(channel);
gpr_mu_unlock(&chand->mu);
if (send_goaway) {
op.type = GRPC_CHANNEL_GOAWAY;
op.dir = GRPC_CALL_DOWN;
op.data.goaway.status = GRPC_STATUS_OK;
op.data.goaway.message = gpr_slice_from_copied_string("Client disconnect");
grpc_channel_next_op(lbelem, &op);
}
op.type = GRPC_CHANNEL_DISCONNECT;
op.dir = GRPC_CALL_DOWN;
grpc_channel_next_op(lbelem, &op);
unref_channel(channel);
}
void grpc_child_channel_handle_op(grpc_child_channel *channel,
@ -250,19 +271,36 @@ void grpc_child_channel_handle_op(grpc_child_channel *channel,
grpc_child_call *grpc_child_channel_create_call(grpc_child_channel *channel,
grpc_call_element *parent) {
grpc_call_stack *stk = gpr_malloc((channel)->call_stack_size);
grpc_call_element *lbelem;
lb_call_data *lbcalld;
ref_channel(channel);
lb_channel_data *lbchand;
grpc_call_stack_init(channel, NULL, stk);
lbcalld = LINK_BACK_ELEM_FROM_CALL(stk)->call_data;
gpr_ref_init(&lbcalld->refs, 1);
lbelem = LINK_BACK_ELEM_FROM_CALL(stk);
lbchand = lbelem->channel_data;
lbcalld = lbelem->call_data;
lbcalld->back = parent;
lbcalld->channel = channel;
return (grpc_child_call *)stk;
gpr_mu_lock(&lbchand->mu);
lbchand->active_calls++;
gpr_mu_unlock(&lbchand->mu);
return stk;
}
void grpc_child_call_destroy(grpc_child_call *call) { unref_call(call); }
void grpc_child_call_destroy(grpc_child_call *call) {
grpc_call_element *lbelem = LINK_BACK_ELEM_FROM_CALL(call);
lb_call_data *calld = lbelem->call_data;
lb_channel_data *chand = lbelem->channel_data;
grpc_child_channel *channel = calld->channel;
grpc_call_stack_destroy(call);
gpr_free(call);
gpr_mu_lock(&chand->mu);
chand->active_calls--;
maybe_destroy_channel(channel);
gpr_mu_unlock(&chand->mu);
}
grpc_call_element *grpc_child_call_get_top_element(grpc_child_call *call) {
return LINK_BACK_ELEM_FROM_CALL(call);

@ -39,6 +39,7 @@
#include "src/core/channel/child_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/metadata_buffer.h"
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string.h>
@ -290,6 +291,14 @@ static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
}
}
static void finally_destroy_channel(void *arg, grpc_iomgr_cb_status status) {
grpc_child_channel_destroy(arg);
}
static void destroy_channel_later(grpc_child_channel *channel) {
grpc_iomgr_add_callback(finally_destroy_channel, channel);
}
static void channel_op(grpc_channel_element *elem,
grpc_channel_element *from_elem, grpc_channel_op *op) {
channel_data *chand = elem->channel_data;
@ -298,24 +307,57 @@ static void channel_op(grpc_channel_element *elem,
switch (op->type) {
case GRPC_CHANNEL_GOAWAY:
/* sending goaway: clear out the active child on the way through */
gpr_mu_lock(&chand->mu);
child_channel = chand->active_child;
chand->active_child = NULL;
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_handle_op(child_channel, op);
grpc_child_channel_destroy(child_channel);
destroy_channel_later(child_channel);
} else {
gpr_slice_unref(op->data.goaway.message);
}
break;
case GRPC_CHANNEL_DISCONNECT:
/* sending disconnect: clear out the active child on the way through */
gpr_mu_lock(&chand->mu);
child_channel = chand->active_child;
chand->active_child = NULL;
gpr_mu_unlock(&chand->mu);
if (child_channel) {
grpc_child_channel_destroy(child_channel);
destroy_channel_later(child_channel);
}
break;
case GRPC_TRANSPORT_GOAWAY:
/* receiving goaway: if it's from our active child, drop the active child;
in all cases consume the event here */
gpr_mu_lock(&chand->mu);
child_channel = grpc_channel_stack_from_top_element(from_elem);
if (child_channel == chand->active_child) {
chand->active_child = NULL;
} else {
child_channel = NULL;
}
gpr_mu_unlock(&chand->mu);
if (child_channel) {
destroy_channel_later(child_channel);
}
gpr_slice_unref(op->data.goaway.message);
break;
case GRPC_TRANSPORT_CLOSED:
/* receiving disconnect: if it's from our active child, drop the active
child; in all cases consume the event here */
gpr_mu_lock(&chand->mu);
child_channel = grpc_channel_stack_from_top_element(from_elem);
if (child_channel == chand->active_child) {
chand->active_child = NULL;
} else {
child_channel = NULL;
}
gpr_mu_unlock(&chand->mu);
if (child_channel) {
destroy_channel_later(child_channel);
}
break;
default:

@ -119,6 +119,7 @@ static void free_fd_list(grpc_fd *impl) {
grpc_fd *current = impl;
impl = impl->next;
grpc_fd_impl_destroy(current);
current->on_done(current->on_done_user_data, GRPC_CALLBACK_SUCCESS);
gpr_free(current);
}
}
@ -556,20 +557,21 @@ grpc_fd *grpc_fd_create(int fd) {
return impl;
}
void grpc_fd_destroy(grpc_fd *impl) {
static void do_nothing(void *ignored, grpc_iomgr_cb_status also_ignored) {}
void grpc_fd_destroy(grpc_fd *impl, grpc_iomgr_cb_func on_done,
void *user_data) {
if (on_done == NULL) on_done = do_nothing;
gpr_mu_lock(&grpc_iomgr_mu);
if (g_num_pollers == 0) {
/* it is safe to simply free it */
grpc_fd_impl_destroy(impl);
gpr_free(impl);
} else {
/* Put the impl on the list to be destroyed by the poller. */
impl->next = g_fds_to_free;
g_fds_to_free = impl;
/* TODO(ctiller): kick the poller so it destroys this fd promptly
(currently we may wait up to a second) */
}
/* Put the impl on the list to be destroyed by the poller. */
impl->on_done = on_done;
impl->on_done_user_data = user_data;
impl->next = g_fds_to_free;
g_fds_to_free = impl;
/* TODO(ctiller): kick the poller so it destroys this fd promptly
(currently we may wait up to a second) */
g_num_fds--;
gpr_cv_broadcast(&grpc_iomgr_cv);

@ -96,9 +96,12 @@ typedef struct grpc_libevent_task {
grpc_fd *grpc_fd_create(int fd);
/* Cause *em_fd no longer to be initialized and closes the underlying fd.
on_done is called when the underlying file descriptor is definitely close()d.
If on_done is NULL, no callback will be made.
Requires: *em_fd initialized; no outstanding notify_on_read or
notify_on_write. */
void grpc_fd_destroy(grpc_fd *em_fd);
void grpc_fd_destroy(grpc_fd *em_fd, grpc_iomgr_cb_func on_done,
void *user_data);
/* Returns the file descriptor associated with *em_fd. */
int grpc_fd_get(grpc_fd *em_fd);
@ -194,6 +197,8 @@ struct grpc_fd {
/* descriptor delete list. These are destroyed during polling. */
struct grpc_fd *next;
grpc_iomgr_cb_func on_done;
void *on_done_user_data;
};
/* gRPC alarm handle.

@ -131,7 +131,7 @@ static void on_writable(void *acp, grpc_iomgr_cb_status status) {
error:
ac->cb(ac->cb_arg, NULL);
grpc_fd_destroy(ac->fd);
grpc_fd_destroy(ac->fd, NULL, NULL);
gpr_free(ac);
return;

@ -276,7 +276,7 @@ static void grpc_tcp_shutdown(grpc_endpoint *ep) {
static void grpc_tcp_unref(grpc_tcp *tcp) {
int refcount_zero = gpr_unref(&tcp->refcount);
if (refcount_zero) {
grpc_fd_destroy(tcp->em_fd);
grpc_fd_destroy(tcp->em_fd, NULL, NULL);
gpr_free(tcp);
}
}

@ -97,8 +97,13 @@ grpc_tcp_server *grpc_tcp_server_create() {
return s;
}
static void done_destroy(void *p, grpc_iomgr_cb_status status) {
gpr_event_set(p, (void *)1);
}
void grpc_tcp_server_destroy(grpc_tcp_server *s) {
size_t i;
gpr_event fd_done;
gpr_mu_lock(&s->mu);
/* shutdown all fd's */
for (i = 0; i < s->nports; i++) {
@ -113,7 +118,9 @@ void grpc_tcp_server_destroy(grpc_tcp_server *s) {
/* delete ALL the things */
for (i = 0; i < s->nports; i++) {
server_port *sp = &s->ports[i];
grpc_fd_destroy(sp->emfd);
gpr_event_init(&fd_done);
grpc_fd_destroy(sp->emfd, done_destroy, &fd_done);
gpr_event_wait(&fd_done, gpr_inf_future);
}
gpr_free(s->ports);
gpr_free(s);

@ -256,12 +256,15 @@ void grpc_call_internal_unref(grpc_call *c) {
}
void grpc_call_destroy(grpc_call *c) {
int cancel;
gpr_mu_lock(&c->read_mu);
if (c->have_alarm) {
grpc_alarm_cancel(&c->alarm);
c->have_alarm = 0;
}
cancel = !c->received_finish;
gpr_mu_unlock(&c->read_mu);
if (cancel) grpc_call_cancel(c);
grpc_call_internal_unref(c);
}

@ -171,6 +171,7 @@ void test_connect(const char *server_host, const char *client_host, int port,
int main(int argc, char **argv) {
int do_ipv6 = 1;
int i;
int port = grpc_pick_unused_port_or_die();
grpc_test_init(argc, argv);
grpc_init();
@ -185,24 +186,23 @@ int main(int argc, char **argv) {
grpc_forbid_dualstack_sockets_for_testing = i;
/* :: and 0.0.0.0 are handled identically. */
test_connect("::", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
test_connect("::", "::ffff:127.0.0.1", grpc_pick_unused_port_or_die(), 1);
test_connect("::", "localhost", grpc_pick_unused_port_or_die(), 1);
test_connect("0.0.0.0", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
test_connect("0.0.0.0", "::ffff:127.0.0.1", grpc_pick_unused_port_or_die(),
1);
test_connect("0.0.0.0", "localhost", grpc_pick_unused_port_or_die(), 1);
test_connect("::", "127.0.0.1", port, 1);
test_connect("::", "::ffff:127.0.0.1", port, 1);
test_connect("::", "localhost", port, 1);
test_connect("0.0.0.0", "127.0.0.1", port, 1);
test_connect("0.0.0.0", "::ffff:127.0.0.1", port, 1);
test_connect("0.0.0.0", "localhost", port, 1);
if (do_ipv6) {
test_connect("::", "::1", grpc_pick_unused_port_or_die(), 1);
test_connect("0.0.0.0", "::1", grpc_pick_unused_port_or_die(), 1);
test_connect("::", "::1", port, 1);
test_connect("0.0.0.0", "::1", port, 1);
}
/* These only work when the families agree. */
test_connect("127.0.0.1", "127.0.0.1", grpc_pick_unused_port_or_die(), 1);
test_connect("127.0.0.1", "127.0.0.1", port, 1);
if (do_ipv6) {
test_connect("::1", "::1", grpc_pick_unused_port_or_die(), 1);
test_connect("::1", "127.0.0.1", grpc_pick_unused_port_or_die(), 0);
test_connect("127.0.0.1", "::1", grpc_pick_unused_port_or_die(), 0);
test_connect("::1", "::1", port, 1);
test_connect("::1", "127.0.0.1", port, 0);
test_connect("127.0.0.1", "::1", port, 0);
}
}

@ -57,6 +57,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack(
fullstack_secure_fixture_data *ffd =
gpr_malloc(sizeof(fullstack_secure_fixture_data));
memset(&f, 0, sizeof(f));
gpr_join_host_port(&ffd->localaddr, "localhost", port);
f.fixture_data = ffd;
@ -79,10 +80,13 @@ static void chttp2_init_server_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *server_args,
grpc_server_credentials *server_creds) {
fullstack_secure_fixture_data *ffd = f->fixture_data;
if (f->server) {
grpc_server_destroy(f->server);
}
f->server =
grpc_secure_server_create(server_creds, f->server_cq, server_args);
grpc_server_credentials_release(server_creds);
grpc_server_add_secure_http2_port(f->server, ffd->localaddr);
GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr));
grpc_server_start(f->server);
}

@ -33,14 +33,7 @@
#include "test/core/end2end/end2end_tests.h"
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
@ -68,6 +61,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
grpc_end2end_test_fixture f;
int port = grpc_pick_unused_port_or_die();
fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data));
memset(&f, 0, sizeof(f));
gpr_join_host_port(&ffd->localaddr, "localhost", port);
@ -87,8 +81,11 @@ void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
void chttp2_init_server_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *server_args) {
fullstack_fixture_data *ffd = f->fixture_data;
if (f->server) {
grpc_server_destroy(f->server);
}
f->server = grpc_server_create(f->server_cq, server_args);
grpc_server_add_http2_port(f->server, ffd->localaddr);
GPR_ASSERT(grpc_server_add_http2_port(f->server, ffd->localaddr));
grpc_server_start(f->server);
}

@ -56,6 +56,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack(
int port = grpc_pick_unused_port_or_die();
fullstack_secure_fixture_data *ffd =
gpr_malloc(sizeof(fullstack_secure_fixture_data));
memset(&f, 0, sizeof(f));
gpr_join_host_port(&ffd->localaddr, "localhost", port);
@ -79,10 +80,13 @@ static void chttp2_init_server_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *server_args,
grpc_server_credentials *server_creds) {
fullstack_secure_fixture_data *ffd = f->fixture_data;
if (f->server) {
grpc_server_destroy(f->server);
}
f->server =
grpc_secure_server_create(server_creds, f->server_cq, server_args);
grpc_server_credentials_release(server_creds);
grpc_server_add_secure_http2_port(f->server, ffd->localaddr);
GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr));
grpc_server_start(f->server);
}

@ -57,6 +57,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_secure_fullstack(
int port = grpc_pick_unused_port_or_die();
fullstack_secure_fixture_data *ffd =
gpr_malloc(sizeof(fullstack_secure_fixture_data));
memset(&f, 0, sizeof(f));
gpr_join_host_port(&ffd->localaddr, "localhost", port);
@ -80,10 +81,13 @@ static void chttp2_init_server_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *server_args,
grpc_server_credentials *server_creds) {
fullstack_secure_fixture_data *ffd = f->fixture_data;
if (f->server) {
grpc_server_destroy(f->server);
}
f->server =
grpc_secure_server_create(server_creds, f->server_cq, server_args);
grpc_server_credentials_release(server_creds);
grpc_server_add_secure_http2_port(f->server, ffd->localaddr);
GPR_ASSERT(grpc_server_add_secure_http2_port(f->server, ffd->localaddr));
grpc_server_start(f->server);
}

@ -32,6 +32,9 @@
*/
#include "test/core/end2end/end2end_tests.h"
#include <string.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_filter.h"
@ -88,11 +91,10 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
grpc_endpoint_pair *sfd = gpr_malloc(sizeof(grpc_endpoint_pair));
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
f.fixture_data = sfd;
f.client_cq = grpc_completion_queue_create();
f.server_cq = grpc_completion_queue_create();
f.server = grpc_server_create_from_filters(f.server_cq, NULL, 0, server_args);
f.client = NULL;
*sfd = grpc_iomgr_create_endpoint_pair(65536);
@ -113,6 +115,9 @@ static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f,
grpc_channel_args *server_args) {
grpc_endpoint_pair *sfd = f->fixture_data;
GPR_ASSERT(!f->server);
f->server =
grpc_server_create_from_filters(f->server_cq, NULL, 0, server_args);
grpc_create_chttp2_transport(server_setup_transport, f, server_args,
sfd->server, NULL, 0, grpc_mdctx_create(), 0);
}

@ -32,6 +32,9 @@
*/
#include "test/core/end2end/end2end_tests.h"
#include <string.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_filter.h"
@ -88,11 +91,10 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
grpc_endpoint_pair *sfd = gpr_malloc(sizeof(grpc_endpoint_pair));
grpc_end2end_test_fixture f;
memset(&f, 0, sizeof(f));
f.fixture_data = sfd;
f.client_cq = grpc_completion_queue_create();
f.server_cq = grpc_completion_queue_create();
f.server = grpc_server_create_from_filters(f.server_cq, NULL, 0, server_args);
f.client = NULL;
*sfd = grpc_iomgr_create_endpoint_pair(1);
@ -113,6 +115,9 @@ static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
static void chttp2_init_server_socketpair(grpc_end2end_test_fixture *f,
grpc_channel_args *server_args) {
grpc_endpoint_pair *sfd = f->fixture_data;
GPR_ASSERT(!f->server);
f->server =
grpc_server_create_from_filters(f->server_cq, NULL, 0, server_args);
grpc_create_chttp2_transport(server_setup_transport, f, server_args,
sfd->server, NULL, 0, grpc_mdctx_create(), 0);
}

@ -21,6 +21,7 @@ END2END_TESTS = [
'cancel_after_invoke',
'cancel_before_invoke',
'cancel_in_a_vacuum',
'disappearing_server',
'early_server_shutdown_finishes_inflight_calls',
'early_server_shutdown_finishes_tags',
'invoke_large_request',

@ -0,0 +1,168 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "test/core/end2end/end2end_tests.h"
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <grpc/byte_buffer.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/end2end/cq_verifier.h"
enum { TIMEOUT = 200000 };
static void *tag(gpr_intptr t) { return (void *)t; }
static gpr_timespec n_seconds_time(int n) {
return gpr_time_add(gpr_now(), gpr_time_from_micros(GPR_US_PER_SEC * n));
}
static gpr_timespec five_seconds_time() { return n_seconds_time(5); }
static void drain_cq(grpc_completion_queue *cq) {
grpc_event *ev;
grpc_completion_type type;
do {
ev = grpc_completion_queue_next(cq, five_seconds_time());
GPR_ASSERT(ev);
type = ev->type;
grpc_event_finish(ev);
} while (type != GRPC_QUEUE_SHUTDOWN);
}
static void shutdown_server(grpc_end2end_test_fixture *f) {
if (!f->server) return;
grpc_server_shutdown(f->server);
grpc_server_destroy(f->server);
f->server = NULL;
}
static void shutdown_client(grpc_end2end_test_fixture *f) {
if (!f->client) return;
grpc_channel_destroy(f->client);
f->client = NULL;
}
static void end_test(grpc_end2end_test_fixture *f) {
shutdown_server(f);
shutdown_client(f);
grpc_completion_queue_shutdown(f->server_cq);
drain_cq(f->server_cq);
grpc_completion_queue_destroy(f->server_cq);
grpc_completion_queue_shutdown(f->client_cq);
drain_cq(f->client_cq);
grpc_completion_queue_destroy(f->client_cq);
}
static void do_request_and_shutdown_server(grpc_end2end_test_fixture *f,
cq_verifier *v_client,
cq_verifier *v_server) {
grpc_call *c;
grpc_call *s;
grpc_status send_status = {GRPC_STATUS_UNIMPLEMENTED, "xyz"};
gpr_timespec deadline = five_seconds_time();
c = grpc_channel_create_call(f->client, "/foo", "test.google.com", deadline);
GPR_ASSERT(c);
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_invoke(c, f->client_cq, tag(1),
tag(2), tag(3), 0));
cq_expect_invoke_accepted(v_client, tag(1), GRPC_OP_OK);
cq_verify(v_client);
GPR_ASSERT(GRPC_CALL_OK == grpc_call_writes_done(c, tag(4)));
cq_expect_finish_accepted(v_client, tag(4), GRPC_OP_OK);
cq_verify(v_client);
GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call(f->server, tag(100)));
cq_expect_server_rpc_new(v_server, &s, tag(100), "/foo", "test.google.com",
deadline, NULL);
cq_verify(v_server);
GPR_ASSERT(GRPC_CALL_OK == grpc_call_accept(s, f->server_cq, tag(102), 0));
cq_expect_client_metadata_read(v_client, tag(2), NULL);
cq_verify(v_client);
/* should be able to shut down the server early
- and still complete the request */
grpc_server_shutdown(f->server);
GPR_ASSERT(GRPC_CALL_OK ==
grpc_call_start_write_status(s, send_status, tag(5)));
cq_expect_finished_with_status(v_client, tag(3), send_status, NULL);
cq_verify(v_client);
cq_expect_finish_accepted(v_server, tag(5), GRPC_OP_OK);
cq_expect_finished(v_server, tag(102), NULL);
cq_verify(v_server);
grpc_call_destroy(c);
grpc_call_destroy(s);
}
static void disappearing_server_test(grpc_end2end_test_config config) {
grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);
gpr_log(GPR_INFO, "%s/%s", __FUNCTION__, config.name);
config.init_client(&f, NULL);
config.init_server(&f, NULL);
do_request_and_shutdown_server(&f, v_client, v_server);
/* now destroy and recreate the server */
config.init_server(&f, NULL);
do_request_and_shutdown_server(&f, v_client, v_server);
cq_verifier_destroy(v_client);
cq_verifier_destroy(v_server);
end_test(&f);
config.tear_down_data(&f);
}
void grpc_end2end_tests(grpc_end2end_test_config config) {
if (config.feature_mask & FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION) {
disappearing_server_test(config);
}
}

@ -120,7 +120,7 @@ static void session_shutdown_cb(void *arg, /*session*/
enum grpc_em_cb_status status) {
session *se = arg;
server *sv = se->sv;
grpc_fd_destroy(se->em_fd);
grpc_fd_destroy(se->em_fd, NULL, NULL);
gpr_free(se);
/* Start to shutdown listen fd. */
grpc_fd_shutdown(sv->em_fd);
@ -178,7 +178,7 @@ static void listen_shutdown_cb(void *arg /*server*/,
enum grpc_em_cb_status status) {
server *sv = arg;
grpc_fd_destroy(sv->em_fd);
grpc_fd_destroy(sv->em_fd, NULL, NULL);
gpr_mu_lock(&sv->mu);
sv->done = 1;
@ -288,7 +288,7 @@ static void client_init(client *cl) {
static void client_session_shutdown_cb(void *arg /*client*/,
enum grpc_em_cb_status status) {
client *cl = arg;
grpc_fd_destroy(cl->em_fd);
grpc_fd_destroy(cl->em_fd, NULL, NULL);
gpr_mu_lock(&cl->mu);
cl->done = 1;
gpr_cv_signal(&cl->done_cv);
@ -468,7 +468,7 @@ static void test_grpc_fd_change() {
GPR_ASSERT(b.cb_that_ran == second_read_callback);
gpr_mu_unlock(&b.mu);
grpc_fd_destroy(em_fd);
grpc_fd_destroy(em_fd, NULL, NULL);
destroy_change_data(&a);
destroy_change_data(&b);
close(sv[0]);
@ -509,7 +509,7 @@ void test_grpc_fd_notify_timeout() {
GPR_ASSERT(gpr_event_wait(&ev, gpr_time_add(deadline, timeout)));
GPR_ASSERT(gpr_event_get(&ev) == (void *)1);
grpc_fd_destroy(em_fd);
grpc_fd_destroy(em_fd, NULL, NULL);
close(sv[1]);
}

Loading…
Cancel
Save