Merge pull request #5426 from grpc/revert-5386-hide-the-pollset-set

Revert "Add an implementation firewall against pollset_set"
pull/5430/head
Vijay Pai 9 years ago
commit 2596d8f7e7
  1. 45
      src/core/channel/client_channel.c
  2. 31
      src/core/client_config/lb_policies/pick_first.c
  3. 25
      src/core/client_config/lb_policies/round_robin.c
  4. 4
      src/core/client_config/lb_policy.c
  5. 3
      src/core/client_config/lb_policy.h
  6. 30
      src/core/client_config/subchannel.c
  7. 24
      src/core/httpcli/httpcli.c
  8. 3
      src/core/httpcli/httpcli.h
  9. 6
      src/core/iomgr/fd_posix.c
  10. 15
      src/core/iomgr/pollset.h
  11. 1
      src/core/iomgr/pollset_multipoller_with_epoll.c
  12. 8
      src/core/iomgr/pollset_multipoller_with_poll_posix.c
  13. 16
      src/core/iomgr/pollset_posix.c
  14. 12
      src/core/iomgr/pollset_posix.h
  15. 10
      src/core/iomgr/pollset_set.h
  16. 26
      src/core/iomgr/pollset_set_posix.c
  17. 20
      src/core/iomgr/pollset_set_posix.h
  18. 4
      src/core/iomgr/pollset_set_windows.c
  19. 2
      src/core/iomgr/pollset_set_windows.h
  20. 7
      src/core/iomgr/pollset_windows.c
  21. 6
      src/core/iomgr/pollset_windows.h
  22. 12
      src/core/iomgr/tcp_client_posix.c
  23. 6
      src/core/iomgr/tcp_posix.c
  24. 1
      src/core/iomgr/udp_server.h
  25. 1
      src/core/iomgr/workqueue_posix.c
  26. 4
      src/core/iomgr/workqueue_posix.h
  27. 39
      src/core/security/google_default_credentials.c
  28. 85
      src/core/surface/completion_queue.c
  29. 2
      test/core/client_config/set_initial_connect_string_test.c
  30. 18
      test/core/end2end/fixtures/h2_full+poll+pipe.c
  31. 16
      test/core/end2end/fixtures/h2_full+poll.c
  32. 1
      test/core/end2end/fixtures/h2_ssl+poll.c
  33. 47
      test/core/end2end/fixtures/h2_uchannel.c
  34. 18
      test/core/end2end/fixtures/h2_uds+poll.c
  35. 44
      test/core/httpcli/httpcli_test.c
  36. 44
      test/core/httpcli/httpscli_test.c
  37. 19
      test/core/iomgr/endpoint_pair_test.c
  38. 18
      test/core/iomgr/endpoint_tests.c
  39. 4
      test/core/iomgr/endpoint_tests.h
  40. 89
      test/core/iomgr/fd_posix_test.c
  41. 74
      test/core/iomgr/tcp_client_posix_test.c
  42. 95
      test/core/iomgr/tcp_posix_test.c
  43. 50
      test/core/iomgr/tcp_server_posix_test.c
  44. 39
      test/core/iomgr/workqueue_test.c
  45. 25
      test/core/security/oauth2_utils.c
  46. 34
      test/core/security/print_google_default_creds_token.c
  47. 26
      test/core/security/secure_endpoint_test.c
  48. 29
      test/core/security/verify_jwt.c
  49. 53
      test/core/util/port_posix.c
  50. 27
      test/core/util/port_windows.c
  51. 17
      test/core/util/test_tcp_server.c
  52. 4
      test/core/util/test_tcp_server.h

@ -78,8 +78,8 @@ typedef struct client_channel_channel_data {
int exit_idle_when_lb_policy_arrives;
/** owning stack */
grpc_channel_stack *owning_stack;
/** interested parties (owned) */
grpc_pollset_set *interested_parties;
/** interested parties */
grpc_pollset_set interested_parties;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
@ -183,8 +183,8 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->incoming_configuration = NULL;
if (lb_policy != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
chand->interested_parties);
grpc_pollset_set_add_pollset_set(exec_ctx, &lb_policy->interested_parties,
&chand->interested_parties);
}
gpr_mu_lock(&chand->mu_config);
@ -231,8 +231,9 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (old_lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(
exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
grpc_pollset_set_del_pollset_set(exec_ctx,
&old_lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
@ -253,7 +254,7 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(op->set_accept_stream == NULL);
if (op->bind_pollset != NULL) {
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties,
op->bind_pollset);
}
@ -283,8 +284,8 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
chand->resolver = NULL;
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
chand->lb_policy = NULL;
}
@ -410,7 +411,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
chand->interested_parties = grpc_pollset_set_create();
grpc_pollset_set_init(&chand->interested_parties);
}
/* Destructor for channel_data */
@ -424,12 +425,12 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
chand->lb_policy->interested_parties,
chand->interested_parties);
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(chand->interested_parties);
grpc_pollset_set_destroy(&chand->interested_parties);
gpr_mu_destroy(&chand->mu_config);
}
@ -440,17 +441,9 @@ static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
}
const grpc_channel_filter grpc_client_channel_filter = {
cc_start_transport_stream_op,
cc_start_transport_op,
sizeof(call_data),
init_call_elem,
cc_set_pollset,
destroy_call_elem,
sizeof(channel_data),
init_channel_elem,
destroy_channel_elem,
cc_get_peer,
"client-channel",
cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
};
void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
@ -508,7 +501,7 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
bool iomgr_success) {
external_connectivity_watcher *w = arg;
grpc_closure *follow_up = w->on_complete;
grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &w->chand->interested_parties,
w->pollset);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
"external_connectivity_watcher");
@ -524,7 +517,7 @@ void grpc_client_channel_watch_connectivity_state(
w->chand = chand;
w->pollset = pollset;
w->on_complete = on_complete;
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties, pollset);
grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");

@ -31,8 +31,8 @@
*
*/
#include "src/core/client_config/lb_policies/pick_first.h"
#include "src/core/client_config/lb_policy_factory.h"
#include "src/core/client_config/lb_policies/pick_first.h"
#include <string.h>
@ -119,7 +119,7 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
@ -137,7 +137,7 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
@ -158,7 +158,7 @@ static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
@ -195,7 +195,8 @@ int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -252,7 +253,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, p->base.interested_parties,
exec_ctx, selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
@ -277,13 +278,13 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected;
grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, selected, p->base.interested_parties,
exec_ctx, selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
@ -297,7 +298,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
} else {
goto loop;
@ -310,7 +311,7 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
p->base.interested_parties, &p->checking_connectivity,
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
break;
case GRPC_CHANNEL_FATAL_FAILURE:
@ -378,14 +379,8 @@ void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy,
pf_shutdown,
pf_pick,
pf_cancel_pick,
pf_ping_one,
pf_exit_idle,
pf_check_connectivity,
pf_notify_on_state_change};
pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_ping_one, pf_exit_idle,
pf_check_connectivity, pf_notify_on_state_change};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}

@ -260,7 +260,7 @@ static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
@ -285,7 +285,7 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
subchannel_data *sd = p->subchannels[i];
sd->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
}
@ -322,7 +322,8 @@ int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_pollset_set_add_pollset(exec_ctx, p->base.interested_parties, pollset);
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -373,13 +374,13 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_CONNECTING:
@ -388,13 +389,13 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
sd->connectivity_state,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
/* renew state notification */
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, p->base.interested_parties,
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
/* remove from ready list if still present */
@ -483,14 +484,8 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy,
rr_shutdown,
rr_pick,
rr_cancel_pick,
rr_ping_one,
rr_exit_idle,
rr_check_connectivity,
rr_notify_on_state_change};
rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_ping_one, rr_exit_idle,
rr_check_connectivity, rr_notify_on_state_change};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}

@ -39,7 +39,7 @@ void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable) {
policy->vtable = vtable;
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
policy->interested_parties = grpc_pollset_set_create();
grpc_pollset_set_init(&policy->interested_parties);
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
@ -93,7 +93,7 @@ void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(policy->interested_parties);
grpc_pollset_set_destroy(&policy->interested_parties);
policy->vtable->destroy(exec_ctx, policy);
}
}

@ -48,8 +48,7 @@ typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_atm ref_pair;
/* owned pointer to interested parties in load balancing decisions */
grpc_pollset_set *interested_parties;
grpc_pollset_set interested_parties;
};
struct grpc_lb_policy_vtable {

@ -108,7 +108,7 @@ struct grpc_subchannel {
/** pollset_set tracking who's interested in a connection
being setup */
grpc_pollset_set *pollset_set;
grpc_pollset_set pollset_set;
/** active connection, or null; of type grpc_connected_subchannel */
gpr_atm connected_subchannel;
@ -184,8 +184,8 @@ static void connection_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_free(c);
}
void grpc_connected_subchannel_ref(
grpc_connected_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
void grpc_connected_subchannel_ref(grpc_connected_subchannel *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
}
@ -209,7 +209,7 @@ static void subchannel_destroy(grpc_exec_ctx *exec_ctx, void *arg,
gpr_slice_unref(c->initial_connect_string);
grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
grpc_connector_unref(exec_ctx, c->connector);
grpc_pollset_set_destroy(c->pollset_set);
grpc_pollset_set_destroy(&c->pollset_set);
grpc_subchannel_key_destroy(exec_ctx, c->key);
gpr_free(c);
}
@ -226,8 +226,8 @@ static gpr_atm ref_mutate(grpc_subchannel *c, gpr_atm delta,
return old_val;
}
grpc_subchannel *grpc_subchannel_ref(
grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
grpc_subchannel *grpc_subchannel_ref(grpc_subchannel *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, (1 << INTERNAL_REF_BITS),
0 REF_MUTATE_PURPOSE("STRONG_REF"));
@ -235,8 +235,8 @@ grpc_subchannel *grpc_subchannel_ref(
return c;
}
grpc_subchannel *grpc_subchannel_weak_ref(
grpc_subchannel *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
grpc_subchannel *grpc_subchannel_weak_ref(grpc_subchannel *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
gpr_atm old_refs;
old_refs = ref_mutate(c, 1, 0 REF_MUTATE_PURPOSE("WEAK_REF"));
GPR_ASSERT(old_refs != 0);
@ -326,7 +326,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
}
c->addr = gpr_malloc(args->addr_len);
memcpy(c->addr, args->addr, args->addr_len);
c->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_init(&c->pollset_set);
c->addr_len = args->addr_len;
grpc_set_initial_connect_string(&c->addr, &c->addr_len,
&c->initial_connect_string);
@ -345,7 +345,7 @@ grpc_subchannel *grpc_subchannel_create(grpc_exec_ctx *exec_ctx,
static void continue_connect(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
args.interested_parties = &c->pollset_set;
args.addr = c->addr;
args.addr_len = c->addr_len;
args.deadline = compute_connect_deadline(c);
@ -379,7 +379,7 @@ static void on_external_state_watcher_done(grpc_exec_ctx *exec_ctx, void *arg,
external_state_watcher *w = arg;
grpc_closure *follow_up = w->notify;
if (w->pollset_set != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
grpc_pollset_set_del_pollset_set(exec_ctx, &w->subchannel->pollset_set,
w->pollset_set);
}
gpr_mu_lock(&w->subchannel->mu);
@ -415,7 +415,7 @@ void grpc_subchannel_notify_on_state_change(
w->notify = notify;
grpc_closure_init(&w->closure, on_external_state_watcher_done, w);
if (interested_parties != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
grpc_pollset_set_add_pollset_set(exec_ctx, &c->pollset_set,
interested_parties);
}
GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher");
@ -573,7 +573,7 @@ static void publish_transport(grpc_exec_ctx *exec_ctx, grpc_subchannel *c) {
GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, con, c->pollset_set, &sw_subchannel->connectivity_state,
exec_ctx, con, &c->pollset_set, &sw_subchannel->connectivity_state,
&sw_subchannel->closure);
/* signal completion */
@ -690,8 +690,8 @@ static void subchannel_call_destroy(grpc_exec_ctx *exec_ctx, void *call,
GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
}
void grpc_subchannel_call_ref(
grpc_subchannel_call *c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
void grpc_subchannel_call_ref(grpc_subchannel_call *c
GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
}

@ -31,22 +31,20 @@
*
*/
#include "src/core/httpcli/httpcli.h"
#include "src/core/iomgr/sockaddr.h"
#include "src/core/httpcli/httpcli.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/httpcli/format_request.h"
#include "src/core/httpcli/parser.h"
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/tcp_client.h"
#include "src/core/httpcli/format_request.h"
#include "src/core/httpcli/parser.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
typedef struct {
gpr_slice request_text;
@ -86,18 +84,18 @@ const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http",
plaintext_handshake};
void grpc_httpcli_context_init(grpc_httpcli_context *context) {
context->pollset_set = grpc_pollset_set_create();
grpc_pollset_set_init(&context->pollset_set);
}
void grpc_httpcli_context_destroy(grpc_httpcli_context *context) {
grpc_pollset_set_destroy(context->pollset_set);
grpc_pollset_set_destroy(&context->pollset_set);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req);
static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
int success) {
grpc_pollset_set_del_pollset(exec_ctx, req->context->pollset_set,
grpc_pollset_set_del_pollset(exec_ctx, &req->context->pollset_set,
req->pollset);
req->on_response(exec_ctx, req->user_data, success ? &req->parser.r : NULL);
grpc_httpcli_parser_destroy(&req->parser);
@ -199,7 +197,7 @@ static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
addr = &req->addresses->addrs[req->next_address++];
grpc_closure_init(&req->connected, on_connected, req);
grpc_tcp_client_connect(
exec_ctx, &req->connected, &req->ep, req->context->pollset_set,
exec_ctx, &req->connected, &req->ep, &req->context->pollset_set,
(struct sockaddr *)&addr->addr, addr->len, req->deadline);
}
@ -239,7 +237,7 @@ static void internal_request_begin(
req->host = gpr_strdup(request->host);
req->ssl_host_override = gpr_strdup(request->ssl_host_override);
grpc_pollset_set_add_pollset(exec_ctx, req->context->pollset_set,
grpc_pollset_set_add_pollset(exec_ctx, &req->context->pollset_set,
req->pollset);
grpc_resolve_address(request->host, req->handshaker->default_port,
on_resolved, req);

@ -39,7 +39,6 @@
#include <grpc/support/time.h>
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/pollset_set.h"
/* User agent this library reports */
@ -57,7 +56,7 @@ typedef struct grpc_httpcli_header {
TODO(ctiller): allow caching and capturing multiple requests for the
same content and combining them */
typedef struct grpc_httpcli_context {
grpc_pollset_set *pollset_set;
grpc_pollset_set pollset_set;
} grpc_httpcli_context;
typedef struct {

@ -46,8 +46,6 @@
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/pollset_posix.h"
#define CLOSURE_NOT_READY ((grpc_closure *)0)
#define CLOSURE_READY ((grpc_closure *)1)
@ -177,11 +175,11 @@ int grpc_fd_is_orphaned(grpc_fd *fd) {
}
static void pollset_kick_locked(grpc_fd_watcher *watcher) {
gpr_mu_lock(&watcher->pollset->mu);
gpr_mu_lock(GRPC_POLLSET_MU(watcher->pollset));
GPR_ASSERT(watcher->worker);
grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
gpr_mu_unlock(&watcher->pollset->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(watcher->pollset));
}
static void maybe_wake_one_watcher_locked(grpc_fd *fd) {

@ -35,11 +35,8 @@
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_H
#include <grpc/support/port_platform.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/exec_ctx.h"
#define GRPC_POLLSET_KICK_BROADCAST ((grpc_pollset_worker *)1)
/* A grpc_pollset is a set of file descriptors that a higher level item is
@ -49,11 +46,15 @@
- a completion queue might keep a pollset with an entry for each transport
that is servicing a call that it's tracking */
typedef struct grpc_pollset grpc_pollset;
typedef struct grpc_pollset_worker grpc_pollset_worker;
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_posix.h"
#endif
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_windows.h"
#endif
size_t grpc_pollset_size(void);
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
void grpc_pollset_init(grpc_pollset *pollset);
/* Begin shutting down the pollset, and call closure when done.
* GRPC_POLLSET_MU(pollset) must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,

@ -45,7 +45,6 @@
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"

@ -42,14 +42,12 @@
#include <stdlib.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/useful.h>
typedef struct {
/* all polled fds */

@ -42,16 +42,16 @@
#include <string.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/iomgr_internal.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/block_annotate.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/thd.h>
#include <grpc/support/tls.h>
#include <grpc/support/useful.h>
GPR_TLS_DECL(g_current_thread_poller);
GPR_TLS_DECL(g_current_thread_worker);
@ -97,8 +97,6 @@ static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
worker->prev->next = worker->next->prev = worker;
}
size_t grpc_pollset_size(void) { return sizeof(grpc_pollset); }
void grpc_pollset_kick_ext(grpc_pollset *p,
grpc_pollset_worker *specific_worker,
uint32_t flags) {
@ -188,9 +186,8 @@ void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
void grpc_pollset_init(grpc_pollset *pollset) {
gpr_mu_init(&pollset->mu);
*mu = &pollset->mu;
pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
pollset->in_flight_cbs = 0;
pollset->shutting_down = 0;
@ -207,6 +204,7 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
GPR_ASSERT(!grpc_pollset_has_workers(pollset));
GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
pollset->vtable->destroy(pollset);
gpr_mu_destroy(&pollset->mu);
while (pollset->local_wakeup_cache) {
grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);

@ -37,10 +37,8 @@
#include <poll.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/exec_ctx.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct grpc_pollset_vtable grpc_pollset_vtable;
@ -55,15 +53,15 @@ typedef struct grpc_cached_wakeup_fd {
struct grpc_cached_wakeup_fd *next;
} grpc_cached_wakeup_fd;
struct grpc_pollset_worker {
typedef struct grpc_pollset_worker {
grpc_cached_wakeup_fd *wakeup_fd;
int reevaluate_polling_on_wakeup;
int kicked_specifically;
struct grpc_pollset_worker *next;
struct grpc_pollset_worker *prev;
};
} grpc_pollset_worker;
struct grpc_pollset {
typedef struct grpc_pollset {
/* pollsets under posix can mutate representation as fds are added and
removed.
For example, we may choose a poll() based implementation on linux for
@ -83,7 +81,7 @@ struct grpc_pollset {
} data;
/* Local cache of eventfds for workers */
grpc_cached_wakeup_fd *local_wakeup_cache;
};
} grpc_pollset;
struct grpc_pollset_vtable {
void (*add_fd)(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
@ -95,6 +93,8 @@ struct grpc_pollset_vtable {
void (*destroy)(grpc_pollset *pollset);
};
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
/* Add an fd to a pollset */
void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
struct grpc_fd *fd);

@ -41,9 +41,15 @@
fd's (etc) that have been registered with the set_set to that pollset.
Registering fd's automatically adds them to all current pollsets. */
typedef struct grpc_pollset_set grpc_pollset_set;
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_set_posix.h"
#endif
grpc_pollset_set *grpc_pollset_set_create(void);
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_set_windows.h"
#endif
void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -41,30 +41,11 @@
#include <grpc/support/alloc.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/pollset_set_posix.h"
#include "src/core/iomgr/pollset_set.h"
struct grpc_pollset_set {
gpr_mu mu;
size_t pollset_count;
size_t pollset_capacity;
grpc_pollset **pollsets;
size_t pollset_set_count;
size_t pollset_set_capacity;
struct grpc_pollset_set **pollset_sets;
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;
};
grpc_pollset_set *grpc_pollset_set_create(void) {
grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
void grpc_pollset_set_init(grpc_pollset_set *pollset_set) {
memset(pollset_set, 0, sizeof(*pollset_set));
gpr_mu_init(&pollset_set->mu);
return pollset_set;
}
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
@ -76,7 +57,6 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
gpr_free(pollset_set->pollsets);
gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
gpr_free(pollset_set);
}
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,7 +35,23 @@
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_POSIX_H
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_set.h"
#include "src/core/iomgr/pollset_posix.h"
typedef struct grpc_pollset_set {
gpr_mu mu;
size_t pollset_count;
size_t pollset_capacity;
grpc_pollset **pollsets;
size_t pollset_set_count;
size_t pollset_set_capacity;
struct grpc_pollset_set **pollset_sets;
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;
} grpc_pollset_set;
void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set, grpc_fd *fd);

@ -35,9 +35,9 @@
#ifdef GPR_WINSOCK_SOCKET
#include "src/core/iomgr/pollset_set_windows.h"
#include "src/core/iomgr/pollset_set.h"
grpc_pollset_set* grpc_pollset_set_create(pollset_set) { return NULL; }
void grpc_pollset_set_init(grpc_pollset_set* pollset_set) {}
void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}

@ -34,6 +34,6 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
#define GRPC_INTERNAL_CORE_IOMGR_POLLSET_SET_WINDOWS_H
#include "src/core/iomgr/pollset_set.h"
typedef struct grpc_pollset_set { void *unused; } grpc_pollset_set;
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -89,17 +89,12 @@ static void push_front_worker(grpc_pollset_worker *root,
worker->links[type].next->links[type].prev = worker;
}
size_t grpc_pollset_size(void) {
return sizeof(grpc_pollset);
}
/* There isn't really any such thing as a pollset under Windows, due to the
nature of the IO completion ports. We're still going to provide a minimal
set of features for the sake of the rest of grpc. But grpc_pollset_work
won't actually do any polling, and return as quickly as possible. */
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
*mu = &grpc_polling_mu;
void grpc_pollset_init(grpc_pollset *pollset) {
memset(pollset, 0, sizeof(*pollset));
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
pollset->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,4 +72,8 @@ struct grpc_pollset {
grpc_closure *on_shutdown;
};
extern gpr_mu grpc_polling_mu;
#define GRPC_POLLSET_MU(pollset) (&grpc_polling_mu)
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_WINDOWS_H */

@ -42,19 +42,17 @@
#include <string.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/timer.h"
#include "src/core/iomgr/iomgr_posix.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/pollset_set_posix.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include "src/core/iomgr/tcp_posix.h"
#include "src/core/iomgr/timer.h"
#include "src/core/support/string.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/time.h>
extern int grpc_tcp_trace;

@ -40,8 +40,8 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
@ -51,11 +51,9 @@
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/support/string.h"
#include "src/core/debug/trace.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/pollset_set_posix.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#ifdef GPR_HAVE_MSG_NOSIGNAL
#define SENDMSG_FLAGS MSG_NOSIGNAL

@ -35,7 +35,6 @@
#define GRPC_INTERNAL_CORE_IOMGR_UDP_SERVER_H
#include "src/core/iomgr/endpoint.h"
#include "src/core/iomgr/fd_posix.h"
/* Forward decl of grpc_server */
typedef struct grpc_server grpc_server;

@ -44,7 +44,6 @@
#include <grpc/support/useful.h>
#include "src/core/iomgr/fd_posix.h"
#include "src/core/iomgr/pollset_posix.h"
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, bool success);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,8 +34,6 @@
#ifndef GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
#define GRPC_INTERNAL_CORE_IOMGR_WORKQUEUE_POSIX_H
#include "src/core/iomgr/wakeup_fd_posix.h"
struct grpc_fd;
struct grpc_workqueue {

@ -52,14 +52,13 @@
static grpc_channel_credentials *default_credentials = NULL;
static int compute_engine_detection_done = 0;
static gpr_mu g_state_mu;
static gpr_mu *g_polling_mu;
static gpr_mu g_mu;
static gpr_once g_once = GPR_ONCE_INIT;
static void init_default_credentials(void) { gpr_mu_init(&g_state_mu); }
static void init_default_credentials(void) { gpr_mu_init(&g_mu); }
typedef struct {
grpc_pollset *pollset;
grpc_pollset pollset;
int is_done;
int success;
} compute_engine_detector;
@ -81,10 +80,10 @@ static void on_compute_engine_detection_http_response(
}
}
}
gpr_mu_lock(g_polling_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&detector->pollset));
detector->is_done = 1;
grpc_pollset_kick(detector->pollset, NULL);
gpr_mu_unlock(g_polling_mu);
grpc_pollset_kick(&detector->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector->pollset));
}
static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *p, bool s) {
@ -102,8 +101,7 @@ static int is_stack_running_on_compute_engine(void) {
on compute engine. */
gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);
detector.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(detector.pollset, &g_polling_mu);
grpc_pollset_init(&detector.pollset);
detector.is_done = 0;
detector.success = 0;
@ -114,7 +112,7 @@ static int is_stack_running_on_compute_engine(void) {
grpc_httpcli_context_init(&context);
grpc_httpcli_get(
&exec_ctx, &context, detector.pollset, &request,
&exec_ctx, &context, &detector.pollset, &request,
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
on_compute_engine_detection_http_response, &detector);
@ -122,22 +120,19 @@ static int is_stack_running_on_compute_engine(void) {
/* Block until we get the response. This is not ideal but this should only be
called once for the lifetime of the process by the default credentials. */
gpr_mu_lock(g_polling_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
while (!detector.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, detector.pollset, &worker,
grpc_pollset_work(&exec_ctx, &detector.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(g_polling_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));
grpc_httpcli_context_destroy(&context);
grpc_closure_init(&destroy_closure, destroy_pollset, detector.pollset);
grpc_pollset_shutdown(&exec_ctx, detector.pollset, &destroy_closure);
grpc_closure_init(&destroy_closure, destroy_pollset, &detector.pollset);
grpc_pollset_shutdown(&exec_ctx, &detector.pollset, &destroy_closure);
grpc_exec_ctx_finish(&exec_ctx);
g_polling_mu = NULL;
gpr_free(detector.pollset);
return detector.success;
}
@ -189,7 +184,7 @@ grpc_channel_credentials *grpc_google_default_credentials_create(void) {
gpr_once_init(&g_once, init_default_credentials);
gpr_mu_lock(&g_state_mu);
gpr_mu_lock(&g_mu);
if (default_credentials != NULL) {
result = grpc_channel_credentials_ref(default_credentials);
@ -235,19 +230,19 @@ end:
gpr_log(GPR_ERROR, "Could not create google default credentials.");
}
}
gpr_mu_unlock(&g_state_mu);
gpr_mu_unlock(&g_mu);
return result;
}
void grpc_flush_cached_google_default_credentials(void) {
gpr_once_init(&g_once, init_default_credentials);
gpr_mu_lock(&g_state_mu);
gpr_mu_lock(&g_mu);
if (default_credentials != NULL) {
grpc_channel_credentials_unref(default_credentials);
default_credentials = NULL;
}
compute_engine_detection_done = 0;
gpr_mu_unlock(&g_state_mu);
gpr_mu_unlock(&g_mu);
}
/* -- Well known credentials path. -- */

@ -36,19 +36,18 @@
#include <stdio.h>
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/pollset.h"
#include "src/core/iomgr/timer.h"
#include "src/core/profiling/timers.h"
#include "src/core/iomgr/pollset.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/call.h"
#include "src/core/surface/event_string.h"
#include "src/core/surface/surface_trace.h"
#include "src/core/profiling/timers.h"
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
typedef struct {
grpc_pollset_worker **worker;
@ -57,8 +56,6 @@ typedef struct {
/* Completion queue structure */
struct grpc_completion_queue {
/** owned by pollset */
gpr_mu *mu;
/** completed events */
grpc_cq_completion completed_head;
grpc_cq_completion *completed_tail;
@ -66,6 +63,8 @@ struct grpc_completion_queue {
gpr_refcount pending_events;
/** Once owning_refs drops to zero, we will destroy the cq */
gpr_refcount owning_refs;
/** the set of low level i/o things that concern this cq */
grpc_pollset pollset;
/** 0 initially, 1 once we've begun shutting down */
int shutdown;
int shutdown_called;
@ -83,8 +82,6 @@ struct grpc_completion_queue {
grpc_completion_queue *next_free;
};
#define POLLSET_FROM_CQ(cq) ((grpc_pollset *)(cq + 1))
static gpr_mu g_freelist_mu;
grpc_completion_queue *g_freelist;
@ -97,7 +94,7 @@ void grpc_cq_global_shutdown(void) {
gpr_mu_destroy(&g_freelist_mu);
while (g_freelist) {
grpc_completion_queue *next = g_freelist->next_free;
grpc_pollset_destroy(POLLSET_FROM_CQ(g_freelist));
grpc_pollset_destroy(&g_freelist->pollset);
#ifndef NDEBUG
gpr_free(g_freelist->outstanding_tags);
#endif
@ -127,8 +124,8 @@ grpc_completion_queue *grpc_completion_queue_create(void *reserved) {
if (g_freelist == NULL) {
gpr_mu_unlock(&g_freelist_mu);
cc = gpr_malloc(sizeof(grpc_completion_queue) + grpc_pollset_size());
grpc_pollset_init(POLLSET_FROM_CQ(cc), &cc->mu);
cc = gpr_malloc(sizeof(grpc_completion_queue));
grpc_pollset_init(&cc->pollset);
#ifndef NDEBUG
cc->outstanding_tags = NULL;
cc->outstanding_tag_capacity = 0;
@ -187,7 +184,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
#endif
if (gpr_unref(&cc->owning_refs)) {
GPR_ASSERT(cc->completed_head.next == (uintptr_t)&cc->completed_head);
grpc_pollset_reset(POLLSET_FROM_CQ(cc));
grpc_pollset_reset(&cc->pollset);
gpr_mu_lock(&g_freelist_mu);
cc->next_free = g_freelist;
g_freelist = cc;
@ -197,7 +194,7 @@ void grpc_cq_internal_unref(grpc_completion_queue *cc) {
void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
#ifndef NDEBUG
gpr_mu_lock(cc->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
GPR_ASSERT(!cc->shutdown_called);
if (cc->outstanding_tag_count == cc->outstanding_tag_capacity) {
cc->outstanding_tag_capacity = GPR_MAX(4, 2 * cc->outstanding_tag_capacity);
@ -206,7 +203,7 @@ void grpc_cq_begin_op(grpc_completion_queue *cc, void *tag) {
cc->outstanding_tag_capacity);
}
cc->outstanding_tags[cc->outstanding_tag_count++] = tag;
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
#endif
gpr_ref(&cc->pending_events);
}
@ -234,7 +231,7 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
storage->next =
((uintptr_t)&cc->completed_head) | ((uintptr_t)(success != 0));
gpr_mu_lock(cc->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
#ifndef NDEBUG
for (i = 0; i < (int)cc->outstanding_tag_count; i++) {
if (cc->outstanding_tags[i] == tag) {
@ -259,8 +256,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
break;
}
}
grpc_pollset_kick(POLLSET_FROM_CQ(cc), pluck_worker);
gpr_mu_unlock(cc->mu);
grpc_pollset_kick(&cc->pollset, pluck_worker);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
} else {
cc->completed_tail->next =
((uintptr_t)storage) | (1u & (uintptr_t)cc->completed_tail->next);
@ -268,9 +265,8 @@ void grpc_cq_end_op(grpc_exec_ctx *exec_ctx, grpc_completion_queue *cc,
GPR_ASSERT(!cc->shutdown);
GPR_ASSERT(cc->shutdown_called);
cc->shutdown = 1;
grpc_pollset_shutdown(exec_ctx, POLLSET_FROM_CQ(cc),
&cc->pollset_shutdown_done);
gpr_mu_unlock(cc->mu);
grpc_pollset_shutdown(exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
GPR_TIMER_END("grpc_cq_end_op", 0);
@ -298,7 +294,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(cc->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
if (cc->completed_tail != &cc->completed_head) {
grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
@ -306,7 +302,7 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
if (c == cc->completed_tail) {
cc->completed_tail = &cc->completed_head;
}
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
@ -314,14 +310,14 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
break;
}
if (cc->shutdown) {
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
break;
}
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
@ -334,12 +330,11 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec iteration_deadline = deadline;
if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
GPR_TIMER_MARK("alarm_triggered", 0);
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(cc->mu);
continue;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
} else {
grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now,
iteration_deadline);
}
}
@ -400,7 +395,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(cc->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
prev = &cc->completed_head;
while ((c = (grpc_cq_completion *)(prev->next & ~(uintptr_t)1)) !=
@ -410,7 +405,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
if (c == cc->completed_tail) {
cc->completed_tail = prev;
}
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
ret.type = GRPC_OP_COMPLETE;
ret.success = c->next & 1u;
ret.tag = c->tag;
@ -420,7 +415,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
prev = c;
}
if (cc->shutdown) {
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_SHUTDOWN;
break;
@ -430,7 +425,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
"Too many outstanding grpc_completion_queue_pluck calls: maximum "
"is %d",
GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
/* TODO(ctiller): should we use a different result here */
ret.type = GRPC_QUEUE_TIMEOUT;
@ -439,7 +434,7 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
del_plucker(cc, tag, &worker);
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
memset(&ret, 0, sizeof(ret));
ret.type = GRPC_QUEUE_TIMEOUT;
break;
@ -452,12 +447,11 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
gpr_timespec iteration_deadline = deadline;
if (grpc_timer_check(&exec_ctx, now, &iteration_deadline)) {
GPR_TIMER_MARK("alarm_triggered", 0);
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(cc->mu);
continue;
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
} else {
grpc_pollset_work(&exec_ctx, POLLSET_FROM_CQ(cc), &worker, now,
grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now,
iteration_deadline);
}
del_plucker(cc, tag, &worker);
@ -478,9 +472,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
GRPC_API_TRACE("grpc_completion_queue_shutdown(cc=%p)", 1, (cc));
gpr_mu_lock(cc->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
if (cc->shutdown_called) {
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
return;
}
@ -488,10 +482,9 @@ void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
if (gpr_unref(&cc->pending_events)) {
GPR_ASSERT(!cc->shutdown);
cc->shutdown = 1;
grpc_pollset_shutdown(&exec_ctx, POLLSET_FROM_CQ(cc),
&cc->pollset_shutdown_done);
grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_shutdown_done);
}
gpr_mu_unlock(cc->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
grpc_exec_ctx_finish(&exec_ctx);
GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
}
@ -505,7 +498,7 @@ void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
}
grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
return POLLSET_FROM_CQ(cc);
return &cc->pollset;
}
void grpc_cq_mark_server_cq(grpc_completion_queue *cc) { cc->is_server_cq = 1; }

@ -85,7 +85,7 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
gpr_slice_buffer_init(&state.incoming_buffer);
gpr_slice_buffer_init(&state.temp_incoming_buffer);
state.tcp = tcp;
grpc_endpoint_add_to_pollset(exec_ctx, tcp, server->pollset);
grpc_endpoint_add_to_pollset(exec_ctx, tcp, &server->pollset);
grpc_endpoint_read(exec_ctx, tcp, &state.temp_incoming_buffer, &on_read);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,23 +35,21 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include "src/core/iomgr/wakeup_fd_posix.h"
typedef struct fullstack_fixture_data {
char *localaddr;

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,20 +35,18 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"

@ -41,7 +41,6 @@
#include <grpc/support/log.h>
#include "src/core/channel/channel_args.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/security/credentials.h"
#include "src/core/support/env.h"
#include "src/core/support/tmpfile.h"

@ -35,13 +35,6 @@
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/channel_args.h"
#include "src/core/channel/client_channel.h"
#include "src/core/channel/client_uchannel.h"
@ -53,6 +46,13 @@
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -238,12 +238,12 @@ static grpc_end2end_test_fixture chttp2_create_fixture_micro_fullstack(
}
grpc_connectivity_state g_state = GRPC_CHANNEL_IDLE;
grpc_pollset_set *g_interested_parties;
grpc_pollset_set g_interested_parties;
static void state_changed(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
if (g_state != GRPC_CHANNEL_READY) {
grpc_subchannel_notify_on_state_change(
exec_ctx, arg, g_interested_parties, &g_state,
exec_ctx, arg, &g_interested_parties, &g_state,
grpc_closure_create(state_changed, arg));
}
}
@ -253,31 +253,30 @@ static void destroy_pollset(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
}
static grpc_connected_subchannel *connect_subchannel(grpc_subchannel *c) {
gpr_mu *mu;
grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset pollset;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_init(pollset, &mu);
g_interested_parties = grpc_pollset_set_create();
grpc_pollset_set_add_pollset(&exec_ctx, g_interested_parties, pollset);
grpc_subchannel_notify_on_state_change(&exec_ctx, c, g_interested_parties,
grpc_pollset_init(&pollset);
grpc_pollset_set_init(&g_interested_parties);
grpc_pollset_set_add_pollset(&exec_ctx, &g_interested_parties, &pollset);
grpc_subchannel_notify_on_state_change(&exec_ctx, c, &g_interested_parties,
&g_state,
grpc_closure_create(state_changed, c));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
while (g_state != GRPC_CHANNEL_READY) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
grpc_pollset_work(&exec_ctx, &pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
gpr_mu_unlock(mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
}
grpc_pollset_shutdown(&exec_ctx, pollset,
grpc_closure_create(destroy_pollset, pollset));
grpc_pollset_set_destroy(g_interested_parties);
gpr_mu_unlock(mu);
grpc_pollset_shutdown(&exec_ctx, &pollset,
grpc_closure_create(destroy_pollset, &pollset));
grpc_pollset_set_destroy(&g_interested_parties);
gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pollset);
return grpc_subchannel_get_connected_subchannel(c);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,22 +37,20 @@
#include <string.h>
#include <unistd.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/channel/client_channel.h"
#include "src/core/channel/connected_channel.h"
#include "src/core/channel/http_server_filter.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/support/string.h"
#include "src/core/surface/channel.h"
#include "src/core/surface/server.h"
#include "src/core/transport/chttp2_transport.h"
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"

@ -36,19 +36,18 @@
#include <string.h>
#include <grpc/grpc.h>
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/iomgr.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
static int g_done = 0;
static grpc_httpcli_context g_context;
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static gpr_timespec n_seconds_time(int seconds) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(seconds);
@ -64,10 +63,10 @@ static void on_finish(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(response->status == 200);
GPR_ASSERT(response->body_length == strlen(expect));
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length));
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_done = 1;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void test_get(int port) {
@ -86,18 +85,18 @@ static void test_get(int port) {
req.path = "/get";
req.handshaker = &grpc_httpcli_plaintext;
grpc_httpcli_get(&exec_ctx, &g_context, g_pollset, &req, n_seconds_time(15),
grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15),
on_finish, (void *)42);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);
}
@ -117,18 +116,18 @@ static void test_post(int port) {
req.path = "/post";
req.handshaker = &grpc_httpcli_plaintext;
grpc_httpcli_post(&exec_ctx, &g_context, g_pollset, &req, "hello", 5,
grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5,
n_seconds_time(15), on_finish, (void *)42);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);
}
@ -176,20 +175,17 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_httpcli_context_init(&g_context);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
test_get(port);
test_post(port);
grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_subprocess_destroy(server);
return 0;

@ -36,19 +36,18 @@
#include <string.h>
#include <grpc/grpc.h>
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/subprocess.h>
#include <grpc/support/sync.h>
#include "src/core/iomgr/iomgr.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
static int g_done = 0;
static grpc_httpcli_context g_context;
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static gpr_timespec n_seconds_time(int seconds) {
return GRPC_TIMEOUT_SECONDS_TO_DEADLINE(seconds);
@ -64,10 +63,10 @@ static void on_finish(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(response->status == 200);
GPR_ASSERT(response->body_length == strlen(expect));
GPR_ASSERT(0 == memcmp(expect, response->body, response->body_length));
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_done = 1;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void test_get(int port) {
@ -87,18 +86,18 @@ static void test_get(int port) {
req.path = "/get";
req.handshaker = &grpc_httpcli_ssl;
grpc_httpcli_get(&exec_ctx, &g_context, g_pollset, &req, n_seconds_time(15),
grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15),
on_finish, (void *)42);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);
}
@ -119,18 +118,18 @@ static void test_post(int port) {
req.path = "/post";
req.handshaker = &grpc_httpcli_ssl;
grpc_httpcli_post(&exec_ctx, &g_context, g_pollset, &req, "hello", 5,
grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5,
n_seconds_time(15), on_finish, (void *)42);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!g_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_free(host);
}
@ -179,20 +178,17 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
grpc_httpcli_context_init(&g_context);
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
test_get(port);
test_post(port);
grpc_httpcli_context_destroy(&g_context);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
gpr_subprocess_destroy(server);
return 0;

@ -39,11 +39,10 @@
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "src/core/iomgr/endpoint_pair.h"
#include "test/core/iomgr/endpoint_tests.h"
#include "test/core/util/test_config.h"
#include "test/core/iomgr/endpoint_tests.h"
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static void clean_up(void) {}
@ -55,8 +54,8 @@ static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
f.client_ep = p.client;
f.server_ep = p.server;
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
return f;
@ -75,14 +74,12 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_pollset_init(&g_pollset);
grpc_endpoint_tests(configs[0], &g_pollset);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -36,8 +36,8 @@
#include <sys/types.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/slice.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/util/test_config.h"
@ -58,7 +58,6 @@
*/
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
size_t count_slices(gpr_slice *slices, size_t nslices, int *current_data) {
@ -135,10 +134,10 @@ static void read_and_write_test_read_handler(grpc_exec_ctx *exec_ctx,
state->incoming.slices, state->incoming.count, &state->current_read_data);
if (state->bytes_read == state->target_bytes || !success) {
gpr_log(GPR_INFO, "Read handler done");
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->read_done = 1 + success;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
} else if (success) {
grpc_endpoint_read(exec_ctx, state->read_ep, &state->incoming,
&state->done_read);
@ -170,10 +169,10 @@ static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
}
gpr_log(GPR_INFO, "Write handler done");
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
state->write_done = 1 + success;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
/* Do both reading and writing using the grpc_endpoint API.
@ -233,14 +232,14 @@ static void read_and_write_test(grpc_endpoint_test_config config,
}
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!state.read_done || !state.write_done) {
grpc_pollset_worker *worker = NULL;
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
end_test(config);
@ -252,10 +251,9 @@ static void read_and_write_test(grpc_endpoint_test_config config,
}
void grpc_endpoint_tests(grpc_endpoint_test_config config,
grpc_pollset *pollset, gpr_mu *mu) {
grpc_pollset *pollset) {
size_t i;
g_pollset = pollset;
g_mu = mu;
read_and_write_test(config, 10000000, 100000, 8192, 0);
read_and_write_test(config, 1000000, 100000, 1, 0);
read_and_write_test(config, 100000000, 100000, 1, 1);

@ -1,6 +1,6 @@
/*
*
* Copyright 2015-2016, Google Inc.
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -53,6 +53,6 @@ struct grpc_endpoint_test_config {
};
void grpc_endpoint_tests(grpc_endpoint_test_config config,
grpc_pollset *pollset, gpr_mu *mu);
grpc_pollset *pollset);
#endif /* GRPC_TEST_CORE_IOMGR_ENDPOINT_TESTS_H */

@ -49,12 +49,9 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/pollset_posix.h"
#include "test/core/util/test_config.h"
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
/* buffer size used to send and receive data.
1024 is the minimal value to set TCP send and receive buffer. */
@ -182,10 +179,10 @@ static void listen_shutdown_cb(grpc_exec_ctx *exec_ctx, void *arg /*server */,
grpc_fd_orphan(exec_ctx, sv->em_fd, NULL, NULL, "b");
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
sv->done = 1;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
/* Called when a new TCP connection request arrives in the listening port. */
@ -212,7 +209,7 @@ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
se = gpr_malloc(sizeof(*se));
se->sv = sv;
se->em_fd = grpc_fd_create(fd, "listener");
grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
grpc_pollset_add_fd(exec_ctx, &g_pollset, se->em_fd);
se->session_read_closure.cb = session_read_cb;
se->session_read_closure.cb_arg = se;
grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
@ -241,7 +238,7 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);
sv->em_fd = grpc_fd_create(fd, "server");
grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
grpc_pollset_add_fd(exec_ctx, &g_pollset, sv->em_fd);
/* Register to be interested in reading from listen_fd. */
sv->listen_closure.cb = listen_cb;
sv->listen_closure.cb_arg = sv;
@ -252,18 +249,18 @@ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
/* Wait and shutdown a sever. */
static void server_wait_and_shutdown(server *sv) {
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!sv->done) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
/* ===An upload client to test notify_on_write=== */
@ -299,7 +296,7 @@ static void client_session_shutdown_cb(grpc_exec_ctx *exec_ctx,
client *cl = arg;
grpc_fd_orphan(exec_ctx, cl->em_fd, NULL, NULL, "c");
cl->done = 1;
grpc_pollset_kick(g_pollset, NULL);
grpc_pollset_kick(&g_pollset, NULL);
}
/* Write as much as possible, then register notify_on_write. */
@ -310,9 +307,9 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
ssize_t write_once = 0;
if (!success) {
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
client_session_shutdown_cb(exec_ctx, arg, 1);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
return;
}
@ -322,7 +319,7 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
} while (write_once > 0);
if (errno == EAGAIN) {
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
cl->write_closure.cb = client_session_write;
cl->write_closure.cb_arg = cl;
@ -331,7 +328,7 @@ static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
} else {
client_session_shutdown_cb(exec_ctx, arg, 1);
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} else {
gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno));
abort();
@ -360,25 +357,25 @@ static void client_start(grpc_exec_ctx *exec_ctx, client *cl, int port) {
}
cl->em_fd = grpc_fd_create(fd, "client");
grpc_pollset_add_fd(exec_ctx, g_pollset, cl->em_fd);
grpc_pollset_add_fd(exec_ctx, &g_pollset, cl->em_fd);
client_session_write(exec_ctx, cl, 1);
}
/* Wait for the signal to shutdown a client. */
static void client_wait_and_shutdown(client *cl) {
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!cl->done) {
grpc_pollset_worker *worker = NULL;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
/* Test grpc_fd. Start an upload server and client, upload a stream of
@ -413,20 +410,20 @@ static void first_read_callback(grpc_exec_ctx *exec_ctx,
void *arg /* fd_change_data */, bool success) {
fd_change_data *fdc = arg;
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
fdc->cb_that_ran = first_read_callback;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void second_read_callback(grpc_exec_ctx *exec_ctx,
void *arg /* fd_change_data */, bool success) {
fd_change_data *fdc = arg;
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
fdc->cb_that_ran = second_read_callback;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
/* Test that changing the callback we use for notify_on_read actually works.
@ -459,7 +456,7 @@ static void test_grpc_fd_change(void) {
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
em_fd = grpc_fd_create(sv[0], "test_grpc_fd_change");
grpc_pollset_add_fd(&exec_ctx, g_pollset, em_fd);
grpc_pollset_add_fd(&exec_ctx, &g_pollset, em_fd);
/* Register the first callback, then make its FD readable */
grpc_fd_notify_on_read(&exec_ctx, em_fd, &first_closure);
@ -468,18 +465,18 @@ static void test_grpc_fd_change(void) {
GPR_ASSERT(result == 1);
/* And now wait for it to run. */
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (a.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
GPR_ASSERT(a.cb_that_ran == first_read_callback);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
/* And drain the socket so we can generate a new read edge */
result = read(sv[0], &data, 1);
@ -492,19 +489,19 @@ static void test_grpc_fd_change(void) {
result = write(sv[1], &data, 1);
GPR_ASSERT(result == 1);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (b.cb_that_ran == NULL) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
/* Except now we verify that second_read_callback ran instead */
GPR_ASSERT(b.cb_that_ran == second_read_callback);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_fd_orphan(&exec_ctx, em_fd, NULL, NULL, "d");
grpc_exec_ctx_finish(&exec_ctx);
@ -522,14 +519,12 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_iomgr_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
test_grpc_fd();
test_grpc_fd_change();
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(g_pollset);
grpc_iomgr_shutdown();
return 0;
}

@ -40,7 +40,6 @@
#include <unistd.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/time.h>
@ -49,9 +48,8 @@
#include "src/core/iomgr/timer.h"
#include "test/core/util/test_config.h"
static grpc_pollset_set *g_pollset_set;
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset_set g_pollset_set;
static grpc_pollset g_pollset;
static int g_connections_complete = 0;
static grpc_endpoint *g_connecting = NULL;
@ -60,10 +58,10 @@ static gpr_timespec test_deadline(void) {
}
static void finish_connection() {
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
g_connections_complete++;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void must_succeed(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
@ -101,14 +99,14 @@ void test_succeeds(void) {
GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
GPR_ASSERT(0 == listen(svr_fd, 1));
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
connections_complete_before = g_connections_complete;
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
/* connect to it */
GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);
grpc_closure_init(&done, must_succeed, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set,
(struct sockaddr *)&addr, addr_len,
gpr_inf_future(GPR_CLOCK_REALTIME));
@ -120,19 +118,19 @@ void test_succeeds(void) {
GPR_ASSERT(r >= 0);
close(r);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (g_connections_complete == connections_complete_before) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
}
@ -149,17 +147,17 @@ void test_fails(void) {
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
connections_complete_before = g_connections_complete;
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
/* connect to a broken address */
grpc_closure_init(&done, must_fail, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set,
(struct sockaddr *)&addr, addr_len,
gpr_inf_future(GPR_CLOCK_REALTIME));
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
/* wait for the connection callback to finish */
while (g_connections_complete == connections_complete_before) {
@ -167,14 +165,14 @@ void test_fails(void) {
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec polling_deadline = test_deadline();
if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker, now, polling_deadline);
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
}
@ -219,16 +217,16 @@ void test_times_out(void) {
connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
connections_complete_before = g_connections_complete;
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_closure_init(&done, must_fail, NULL);
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set,
(struct sockaddr *)&addr, addr_len, connect_deadline);
/* Make sure the event doesn't trigger early */
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
for (;;) {
grpc_pollset_worker *worker = NULL;
gpr_timespec now = gpr_now(connect_deadline.clock_type);
@ -254,13 +252,13 @@ void test_times_out(void) {
}
gpr_timespec polling_deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker, now, polling_deadline);
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
@ -279,20 +277,18 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
g_pollset_set = grpc_pollset_set_create();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_set_add_pollset(&exec_ctx, g_pollset_set, g_pollset);
grpc_pollset_set_init(&g_pollset_set);
grpc_pollset_init(&g_pollset);
grpc_pollset_set_add_pollset(&exec_ctx, &g_pollset_set, &g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
test_succeeds();
gpr_log(GPR_ERROR, "End of first test");
test_fails();
test_times_out();
grpc_pollset_set_destroy(g_pollset_set);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_pollset_set_destroy(&g_pollset_set);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -36,8 +36,8 @@
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include <grpc/grpc.h>
@ -45,11 +45,10 @@
#include <grpc/support/log.h>
#include <grpc/support/time.h>
#include <grpc/support/useful.h>
#include "test/core/iomgr/endpoint_tests.h"
#include "test/core/util/test_config.h"
#include "test/core/iomgr/endpoint_tests.h"
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
/*
General test notes:
@ -146,7 +145,7 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
GPR_ASSERT(success);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
current_data = state->read_bytes % 256;
read_bytes = count_slices(state->incoming.slices, state->incoming.count,
&current_data);
@ -154,10 +153,10 @@ static void read_cb(grpc_exec_ctx *exec_ctx, void *user_data, bool success) {
gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes,
state->target_read_bytes);
if (state->read_bytes >= state->target_read_bytes) {
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
} else {
grpc_endpoint_read(exec_ctx, state->ep, &state->incoming, &state->read_cb);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
}
@ -176,7 +175,7 @@ static void read_test(size_t num_bytes, size_t slice_size) {
create_sockets(sv);
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -189,17 +188,17 @@ static void read_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_slice_buffer_destroy(&state.incoming);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -222,7 +221,7 @@ static void large_read_test(size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
"test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
written_bytes = fill_socket(sv[0]);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -235,17 +234,17 @@ static void large_read_test(size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_slice_buffer_destroy(&state.incoming);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -284,11 +283,11 @@ static void write_done(grpc_exec_ctx *exec_ctx,
void *user_data /* write_socket_state */, bool success) {
struct write_socket_state *state = (struct write_socket_state *)user_data;
gpr_log(GPR_INFO, "Write done callback called");
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_log(GPR_INFO, "Signalling write done");
state->write_done = 1;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
@ -305,11 +304,11 @@ void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
for (;;) {
grpc_pollset_worker *worker = NULL;
gpr_mu_lock(g_mu);
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
do {
bytes_read =
@ -351,7 +350,7 @@ static void write_test(size_t num_bytes, size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
state.ep = ep;
state.write_done = 0;
@ -364,19 +363,19 @@ static void write_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
drain_socket_blocking(sv[0], num_bytes, num_bytes);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
for (;;) {
grpc_pollset_worker *worker = NULL;
if (state.write_done) {
break;
}
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_slice_buffer_destroy(&outgoing);
grpc_endpoint_destroy(&exec_ctx, ep);
@ -387,7 +386,7 @@ static void write_test(size_t num_bytes, size_t slice_size) {
void on_fd_released(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
int *done = arg;
*done = 1;
grpc_pollset_kick(g_pollset, NULL);
grpc_pollset_kick(&g_pollset, NULL);
}
/* Do a read_test, then release fd and try to read/write again. Verify that
@ -411,7 +410,7 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test");
GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);
written_bytes = fill_socket_partial(sv[0], num_bytes);
gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);
@ -424,27 +423,27 @@ static void release_fd_test(size_t num_bytes, size_t slice_size) {
grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (state.read_bytes < state.target_read_bytes) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
GPR_ASSERT(state.read_bytes == state.target_read_bytes);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_slice_buffer_destroy(&state.incoming);
grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!fd_released_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, g_pollset, &worker,
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
}
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
GPR_ASSERT(fd_released_done == 1);
GPR_ASSERT(fd == sv[1]);
grpc_exec_ctx_finish(&exec_ctx);
@ -492,8 +491,8 @@ static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
slice_size, "test");
f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
slice_size, "test");
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);
grpc_exec_ctx_finish(&exec_ctx);
@ -513,15 +512,13 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
run_tests();
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_endpoint_tests(configs[0], &g_pollset);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -32,28 +32,24 @@
*/
#include "src/core/iomgr/tcp_server.h"
#include <errno.h>
#include <netinet/in.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr_utils.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
#include <errno.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <string.h>
#include <unistd.h>
#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", #x)
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static int g_nconnects = 0;
typedef struct on_connect_result {
@ -117,11 +113,11 @@ static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
grpc_endpoint_shutdown(exec_ctx, tcp);
grpc_endpoint_destroy(exec_ctx, tcp);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
on_connect_result_set(&g_result, acceptor);
g_nconnects++;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void test_no_op(void) {
@ -178,7 +174,7 @@ static void tcp_connect(grpc_exec_ctx *exec_ctx, const struct sockaddr *remote,
int clifd = socket(remote->sa_family, SOCK_STREAM, 0);
int nconnects_before;
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
nconnects_before = g_nconnects;
on_connect_result_init(&g_result);
GPR_ASSERT(clifd >= 0);
@ -188,18 +184,18 @@ static void tcp_connect(grpc_exec_ctx *exec_ctx, const struct sockaddr *remote,
while (g_nconnects == nconnects_before &&
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(exec_ctx, g_pollset, &worker,
grpc_pollset_work(exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(exec_ctx);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
}
gpr_log(GPR_DEBUG, "wait done");
GPR_ASSERT(g_nconnects == nconnects_before + 1);
close(clifd);
*result = g_result;
gpr_mu_unlock(g_mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
/* Tests a tcp server with multiple ports. TODO(daniel-j-born): Multiple fds for
@ -214,6 +210,7 @@ static void test_connect(unsigned n) {
unsigned svr1_fd_count;
int svr1_port;
grpc_tcp_server *s = grpc_tcp_server_create(NULL);
grpc_pollset *pollsets[1];
unsigned i;
server_weak_ref weak_ref;
server_weak_ref_init(&weak_ref);
@ -262,7 +259,8 @@ static void test_connect(unsigned n) {
}
}
grpc_tcp_server_start(&exec_ctx, s, &g_pollset, 1, on_connect, NULL);
pollsets[0] = &g_pollset;
grpc_tcp_server_start(&exec_ctx, s, pollsets, 1, on_connect, NULL);
for (i = 0; i < n; i++) {
on_connect_result result;
@ -314,8 +312,7 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
test_no_op();
test_no_op_with_start();
@ -324,10 +321,9 @@ int main(int argc, char **argv) {
test_connect(1);
test_connect(10);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -34,20 +34,18 @@
#include "src/core/iomgr/workqueue.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "test/core/util/test_config.h"
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static void must_succeed(grpc_exec_ctx *exec_ctx, void *p, bool success) {
GPR_ASSERT(success == 1);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
*(int *)p = 1;
grpc_pollset_kick(g_pollset, NULL);
gpr_mu_unlock(g_mu);
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
static void test_ref_unref(void) {
@ -69,13 +67,13 @@ static void test_add_closure(void) {
grpc_closure_init(&c, must_succeed, &done);
grpc_workqueue_push(wq, &c, 1);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, &g_pollset);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
GPR_ASSERT(!done);
grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type),
deadline);
gpr_mu_unlock(g_mu);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(deadline.clock_type), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
@ -94,13 +92,13 @@ static void test_flush(void) {
grpc_exec_ctx_enqueue(&exec_ctx, &c, true, NULL);
grpc_workqueue_flush(&exec_ctx, wq);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset);
grpc_workqueue_add_to_pollset(&exec_ctx, wq, &g_pollset);
gpr_mu_lock(g_mu);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
GPR_ASSERT(!done);
grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type),
deadline);
gpr_mu_unlock(g_mu);
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(deadline.clock_type), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
grpc_exec_ctx_finish(&exec_ctx);
GPR_ASSERT(done);
@ -117,18 +115,15 @@ int main(int argc, char **argv) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_pollset_init(&g_pollset);
test_ref_unref();
test_add_closure();
test_flush();
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -45,8 +45,7 @@
#include "src/core/security/credentials.h"
typedef struct {
gpr_mu *mu;
grpc_pollset *pollset;
grpc_pollset pollset;
int is_done;
char *token;
} oauth2_request;
@ -67,11 +66,11 @@ static void on_oauth2_response(grpc_exec_ctx *exec_ctx, void *user_data,
GPR_SLICE_LENGTH(token_slice));
token[GPR_SLICE_LENGTH(token_slice)] = '\0';
}
gpr_mu_lock(request->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&request->pollset));
request->is_done = 1;
request->token = token;
grpc_pollset_kick(request->pollset, NULL);
gpr_mu_unlock(request->mu);
grpc_pollset_kick(&request->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&request->pollset));
}
static void do_nothing(grpc_exec_ctx *exec_ctx, void *unused, bool success) {}
@ -83,30 +82,28 @@ char *grpc_test_fetch_oauth2_token_with_credentials(
grpc_closure do_nothing_closure;
grpc_auth_metadata_context null_ctx = {"", "", NULL, NULL};
request.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(request.pollset, &request.mu);
grpc_pollset_init(&request.pollset);
request.is_done = 0;
grpc_closure_init(&do_nothing_closure, do_nothing, NULL);
grpc_call_credentials_get_request_metadata(&exec_ctx, creds, request.pollset,
grpc_call_credentials_get_request_metadata(&exec_ctx, creds, &request.pollset,
null_ctx, on_oauth2_response,
&request);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(request.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
while (!request.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, request.pollset, &worker,
grpc_pollset_work(&exec_ctx, &request.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(request.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));
grpc_pollset_shutdown(&exec_ctx, request.pollset, &do_nothing_closure);
grpc_pollset_shutdown(&exec_ctx, &request.pollset, &do_nothing_closure);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_destroy(request.pollset);
gpr_free(request.pollset);
grpc_pollset_destroy(&request.pollset);
return request.token;
}

@ -34,6 +34,8 @@
#include <stdio.h>
#include <string.h>
#include "src/core/security/credentials.h"
#include "src/core/support/string.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
@ -42,12 +44,8 @@
#include <grpc/support/slice.h>
#include <grpc/support/sync.h>
#include "src/core/security/credentials.h"
#include "src/core/support/string.h"
typedef struct {
gpr_mu *mu;
grpc_pollset *pollset;
grpc_pollset pollset;
int is_done;
} synchronizer;
@ -64,10 +62,10 @@ static void on_metadata_response(grpc_exec_ctx *exec_ctx, void *user_data,
printf("\nGot token: %s\n\n", token);
gpr_free(token);
}
gpr_mu_lock(sync->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
sync->is_done = 1;
grpc_pollset_kick(sync->pollset, NULL);
gpr_mu_unlock(sync->mu);
grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
}
int main(int argc, char **argv) {
@ -93,30 +91,26 @@ int main(int argc, char **argv) {
goto end;
}
sync.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(sync.pollset, &sync.mu);
grpc_pollset_init(&sync.pollset);
sync.is_done = 0;
grpc_call_credentials_get_request_metadata(
&exec_ctx, ((grpc_composite_channel_credentials *)creds)->call_creds,
sync.pollset, context, on_metadata_response, &sync);
&sync.pollset, context, on_metadata_response, &sync);
gpr_mu_lock(sync.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
while (!sync.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
grpc_pollset_work(&exec_ctx, &sync.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(sync.mu);
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(sync.mu);
}
gpr_mu_unlock(sync.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_channel_credentials_release(creds);
gpr_free(sync.pollset);
end:
gpr_cmdline_destroy(cl);

@ -36,17 +36,16 @@
#include <fcntl.h>
#include <sys/types.h>
#include "src/core/security/secure_endpoint.h"
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/iomgr.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/security/secure_endpoint.h"
#include "src/core/tsi/fake_transport_security.h"
#include "test/core/util/test_config.h"
#include "src/core/tsi/fake_transport_security.h"
static gpr_mu *g_mu;
static grpc_pollset *g_pollset;
static grpc_pollset g_pollset;
static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
size_t slice_size, gpr_slice *leftover_slices, size_t leftover_nslices) {
@ -57,8 +56,8 @@ static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
grpc_endpoint_pair tcp;
tcp = grpc_iomgr_create_endpoint_pair("fixture", slice_size);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, &g_pollset);
grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, &g_pollset);
if (leftover_nslices == 0) {
f.client_ep =
@ -182,16 +181,13 @@ int main(int argc, char **argv) {
grpc_test_init(argc, argv);
grpc_init();
g_pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(g_pollset, &g_mu);
grpc_endpoint_tests(configs[0], g_pollset, g_mu);
grpc_pollset_init(&g_pollset);
grpc_endpoint_tests(configs[0], &g_pollset);
test_leftover(configs[1], 1);
grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
grpc_exec_ctx_finish(&exec_ctx);
grpc_shutdown();
gpr_free(g_pollset);
return 0;
}

@ -34,6 +34,7 @@
#include <stdio.h>
#include <string.h>
#include "src/core/security/jwt_verifier.h"
#include <grpc/grpc.h>
#include <grpc/grpc_security.h>
#include <grpc/support/alloc.h>
@ -42,11 +43,8 @@
#include <grpc/support/slice.h>
#include <grpc/support/sync.h>
#include "src/core/security/jwt_verifier.h"
typedef struct {
grpc_pollset *pollset;
gpr_mu *mu;
grpc_pollset pollset;
int is_done;
int success;
} synchronizer;
@ -79,10 +77,10 @@ static void on_jwt_verification_done(void *user_data,
grpc_jwt_verifier_status_to_string(status));
}
gpr_mu_lock(sync->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
sync->is_done = 1;
grpc_pollset_kick(sync->pollset, NULL);
gpr_mu_unlock(sync->mu);
grpc_pollset_kick(&sync->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
}
int main(int argc, char **argv) {
@ -105,26 +103,23 @@ int main(int argc, char **argv) {
grpc_init();
sync.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(sync.pollset, &sync.mu);
grpc_pollset_init(&sync.pollset);
sync.is_done = 0;
grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud,
grpc_jwt_verifier_verify(&exec_ctx, verifier, &sync.pollset, jwt, aud,
on_jwt_verification_done, &sync);
gpr_mu_lock(sync.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
while (!sync.is_done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
grpc_pollset_work(&exec_ctx, &sync.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_inf_future(GPR_CLOCK_MONOTONIC));
gpr_mu_unlock(sync.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(sync.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
}
gpr_mu_unlock(sync.mu);
gpr_free(sync.pollset);
gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
grpc_jwt_verifier_destroy(verifier);
gpr_cmdline_destroy(cl);

@ -69,8 +69,7 @@ static int has_port_been_chosen(int port) {
}
typedef struct freereq {
gpr_mu *mu;
grpc_pollset *pollset;
grpc_pollset pollset;
int done;
} freereq;
@ -83,10 +82,10 @@ static void destroy_pollset_and_shutdown(grpc_exec_ctx *exec_ctx, void *p,
static void freed_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
const grpc_httpcli_response *response) {
freereq *pr = arg;
gpr_mu_lock(pr->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
pr->done = 1;
grpc_pollset_kick(pr->pollset, NULL);
gpr_mu_unlock(pr->mu);
grpc_pollset_kick(&pr->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
}
static void free_port_using_server(char *server, int port) {
@ -101,34 +100,31 @@ static void free_port_using_server(char *server, int port) {
memset(&pr, 0, sizeof(pr));
memset(&req, 0, sizeof(req));
pr.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(pr.pollset, &pr.mu);
grpc_pollset_init(&pr.pollset);
grpc_closure_init(&shutdown_closure, destroy_pollset_and_shutdown,
pr.pollset);
&pr.pollset);
req.host = server;
gpr_asprintf(&path, "/drop/%d", port);
req.path = path;
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, pr.pollset, &req,
grpc_httpcli_get(&exec_ctx, &context, &pr.pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), freed_port_from_server,
&pr);
gpr_mu_lock(pr.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
while (!pr.done) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, pr.pollset, &worker,
grpc_pollset_work(&exec_ctx, &pr.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
}
gpr_mu_unlock(pr.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
grpc_httpcli_context_destroy(&context);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_shutdown(&exec_ctx, pr.pollset, &shutdown_closure);
grpc_pollset_shutdown(&exec_ctx, &pr.pollset, &shutdown_closure);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pr.pollset);
gpr_free(path);
}
@ -206,8 +202,7 @@ static int is_port_available(int *port, int is_tcp) {
}
typedef struct portreq {
gpr_mu *mu;
grpc_pollset *pollset;
grpc_pollset pollset;
int port;
int retries;
char *server;
@ -239,7 +234,7 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
pr->retries++;
req.host = pr->server;
req.path = "/get";
grpc_httpcli_get(exec_ctx, pr->ctx, pr->pollset, &req,
grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
pr);
return;
@ -251,10 +246,10 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
port = port * 10 + response->body[i] - '0';
}
GPR_ASSERT(port > 1024);
gpr_mu_lock(pr->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
pr->port = port;
grpc_pollset_kick(pr->pollset, NULL);
gpr_mu_unlock(pr->mu);
grpc_pollset_kick(&pr->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
}
static int pick_port_using_server(char *server) {
@ -268,10 +263,9 @@ static int pick_port_using_server(char *server) {
memset(&pr, 0, sizeof(pr));
memset(&req, 0, sizeof(req));
pr.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(pr.pollset, &pr.mu);
grpc_pollset_init(&pr.pollset);
grpc_closure_init(&shutdown_closure, destroy_pollset_and_shutdown,
pr.pollset);
&pr.pollset);
pr.port = -1;
pr.server = server;
pr.ctx = &context;
@ -280,23 +274,22 @@ static int pick_port_using_server(char *server) {
req.path = "/get";
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, pr.pollset, &req,
grpc_httpcli_get(&exec_ctx, &context, &pr.pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
&pr);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(pr.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
while (pr.port == -1) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, pr.pollset, &worker,
grpc_pollset_work(&exec_ctx, &pr.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
}
gpr_mu_unlock(pr.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
grpc_httpcli_context_destroy(&context);
grpc_pollset_shutdown(&exec_ctx, pr.pollset, &shutdown_closure);
grpc_pollset_shutdown(&exec_ctx, &pr.pollset, &shutdown_closure);
grpc_exec_ctx_finish(&exec_ctx);
gpr_free(pr.pollset);
return pr.port;
}

@ -129,8 +129,7 @@ static int is_port_available(int *port, int is_tcp) {
}
typedef struct portreq {
grpc_pollset *pollset;
gpr_mu *mu;
grpc_pollset pollset;
int port;
} portreq;
@ -146,10 +145,10 @@ static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
port = port * 10 + response->body[i] - '0';
}
GPR_ASSERT(port > 1024);
gpr_mu_lock(pr->mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
pr->port = port;
grpc_pollset_kick(pr->pollset, NULL);
gpr_mu_unlock(pr->mu);
grpc_pollset_kick(&pr->pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
}
static void destroy_pollset_and_shutdown(grpc_exec_ctx *exec_ctx, void *p,
@ -169,34 +168,32 @@ static int pick_port_using_server(char *server) {
memset(&pr, 0, sizeof(pr));
memset(&req, 0, sizeof(req));
pr.pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(pr.pollset, &pr.mu);
grpc_pollset_init(&pr.pollset);
pr.port = -1;
req.host = server;
req.path = "/get";
grpc_httpcli_context_init(&context);
grpc_httpcli_get(&exec_ctx, &context, pr.pollset, &req,
grpc_httpcli_get(&exec_ctx, &context, &pr.pollset, &req,
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
&pr);
gpr_mu_lock(pr.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
while (pr.port == -1) {
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, pr.pollset, &worker,
grpc_pollset_work(&exec_ctx, &pr.pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC),
GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
gpr_mu_unlock(pr.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
grpc_exec_ctx_flush(&exec_ctx);
gpr_mu_lock(pr.mu);
gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset));
}
gpr_mu_unlock(pr.mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset));
grpc_httpcli_context_destroy(&context);
grpc_closure_init(&destroy_pollset_closure, destroy_pollset_and_shutdown,
&pr.pollset);
grpc_pollset_shutdown(&exec_ctx, pr.pollset, &destroy_pollset_closure);
gpr_free(pr.pollset);
grpc_pollset_shutdown(&exec_ctx, &pr.pollset, &destroy_pollset_closure);
grpc_exec_ctx_finish(&exec_ctx);
return pr.port;

@ -57,8 +57,8 @@ void test_tcp_server_init(test_tcp_server *server,
server->tcp_server = NULL;
grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server);
server->shutdown = 0;
server->pollset = gpr_malloc(grpc_pollset_size());
grpc_pollset_init(server->pollset, &server->mu);
grpc_pollset_init(&server->pollset);
server->pollsets[0] = &server->pollset;
server->on_connect = on_connect;
server->cb_data = user_data;
}
@ -77,7 +77,7 @@ void test_tcp_server_start(test_tcp_server *server, int port) {
grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr));
GPR_ASSERT(port_added == port);
grpc_tcp_server_start(&exec_ctx, server->tcp_server, &server->pollset, 1,
grpc_tcp_server_start(&exec_ctx, server->tcp_server, server->pollsets, 1,
server->on_connect, server->cb_data);
gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);
@ -90,10 +90,10 @@ void test_tcp_server_poll(test_tcp_server *server, int seconds) {
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(seconds, GPR_TIMESPAN));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
gpr_mu_lock(server->mu);
grpc_pollset_work(&exec_ctx, server->pollset, &worker,
gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
grpc_pollset_work(&exec_ctx, &server->pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(server->mu);
gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
grpc_exec_ctx_finish(&exec_ctx);
}
@ -111,9 +111,8 @@ void test_tcp_server_destroy(test_tcp_server *server) {
gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
test_tcp_server_poll(server, 1);
}
grpc_pollset_shutdown(&exec_ctx, server->pollset, &do_nothing_cb);
grpc_pollset_shutdown(&exec_ctx, &server->pollset, &do_nothing_cb);
grpc_exec_ctx_finish(&exec_ctx);
grpc_pollset_destroy(server->pollset);
gpr_free(server->pollset);
grpc_pollset_destroy(&server->pollset);
grpc_shutdown();
}

@ -41,8 +41,8 @@ typedef struct test_tcp_server {
grpc_tcp_server *tcp_server;
grpc_closure shutdown_complete;
int shutdown;
gpr_mu *mu;
grpc_pollset *pollset;
grpc_pollset pollset;
grpc_pollset *pollsets[1];
grpc_tcp_server_cb on_connect;
void *cb_data;
} test_tcp_server;

Loading…
Cancel
Save