Merge github.com:grpc/grpc into sometimes-its-good-just-to-check-in-with-each-other

Conflicts:
	src/core/client_config/subchannel.c
	src/core/iomgr/alarm.c
	src/core/iomgr/iomgr.c
	src/core/iomgr/tcp_client_posix.c
pull/2477/head
Craig Tiller 9 years ago
commit 044d153141
  1. 3
      include/grpc/support/time.h
  2. 28
      src/core/channel/client_channel.c
  3. 4
      src/core/client_config/subchannel.c
  4. 10
      src/core/iomgr/alarm.c
  5. 10
      src/core/iomgr/iomgr.c
  6. 4
      src/core/iomgr/pollset_posix.c
  7. 2
      src/core/iomgr/pollset_windows.c
  8. 4
      src/core/iomgr/tcp_client_posix.c
  9. 2
      src/core/iomgr/tcp_client_windows.c
  10. 3
      src/core/support/sync_posix.c
  11. 4
      src/core/support/sync_win32.c
  12. 27
      src/core/support/time.c
  13. 2
      src/core/support/time_posix.c
  14. 2
      src/core/support/time_win32.c
  15. 6
      src/core/surface/call.c
  16. 4
      src/core/surface/completion_queue.c
  17. 9
      src/core/transport/chttp2/stream_encoder.c
  18. 2
      src/core/transport/transport_op_string.c
  19. 9
      test/core/iomgr/alarm_test.c
  20. 6
      test/core/iomgr/endpoint_tests.c
  21. 8
      test/core/iomgr/fd_posix_test.c
  22. 6
      test/core/iomgr/tcp_client_posix_test.c
  23. 2
      test/core/iomgr/tcp_server_posix_test.c
  24. 4
      test/core/util/test_config.h
  25. 7
      test/cpp/interop/client.cc
  26. 28
      test/cpp/interop/interop_client.cc
  27. 3
      test/cpp/interop/interop_client.h
  28. 95
      tools/jenkins/run_distribution.sh

@ -83,6 +83,9 @@ void gpr_time_init(void);
/* Return the current time measured from the given clocks epoch. */
gpr_timespec gpr_now(gpr_clock_type clock);
/* Convert a timespec from one clock to another */
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock);
/* Return -ve, 0, or +ve according to whether a < b, a == b, or a > b
respectively. */
int gpr_time_cmp(gpr_timespec a, gpr_timespec b);

@ -249,21 +249,6 @@ static void picked_target(void *arg, int iomgr_success) {
}
}
static void pick_target(grpc_lb_policy *lb_policy, call_data *calld) {
grpc_metadata_batch *initial_metadata;
grpc_transport_stream_op *op = &calld->waiting_op;
GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT(op->send_ops->ops[0].type == GRPC_OP_METADATA);
initial_metadata = &op->send_ops->ops[0].data.metadata;
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(lb_policy, op->bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task);
}
static grpc_iomgr_closure *merge_into_waiting_op(
grpc_call_element *elem, grpc_transport_stream_op *new_op) {
call_data *calld = elem->call_data;
@ -371,12 +356,23 @@ static void perform_transport_stream_op(grpc_call_element *elem,
gpr_mu_lock(&chand->mu_config);
lb_policy = chand->lb_policy;
if (lb_policy) {
grpc_transport_stream_op *op = &calld->waiting_op;
grpc_pollset *bind_pollset = op->bind_pollset;
grpc_metadata_batch *initial_metadata = &op->send_ops->ops[0].data.metadata;
GRPC_LB_POLICY_REF(lb_policy, "pick");
gpr_mu_unlock(&chand->mu_config);
calld->state = CALL_WAITING_FOR_PICK;
GPR_ASSERT(op->bind_pollset);
GPR_ASSERT(op->send_ops);
GPR_ASSERT(op->send_ops->nops >= 1);
GPR_ASSERT(
op->send_ops->ops[0].type == GRPC_OP_METADATA);
gpr_mu_unlock(&calld->mu_state);
pick_target(lb_policy, calld);
grpc_iomgr_closure_init(&calld->async_setup_task, picked_target, calld);
grpc_lb_policy_pick(lb_policy, bind_pollset, initial_metadata,
&calld->picked_channel, &calld->async_setup_task);
GRPC_LB_POLICY_UNREF(lb_policy, "pick");
} else if (chand->resolver != NULL) {

@ -307,7 +307,7 @@ static void continue_connect(grpc_subchannel *c) {
}
static void start_connect(grpc_subchannel *c) {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
c->next_attempt = now;
c->backoff_delta = gpr_time_from_seconds(1, GPR_TIMESPAN);
@ -598,7 +598,7 @@ static void subchannel_connected(void *arg, int iomgr_success) {
if (c->connecting_result.transport != NULL) {
publish_transport(c);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_mu_lock(&c->mu);
GPR_ASSERT(!c->have_alarm);
c->have_alarm = 1;

@ -36,6 +36,7 @@
#include "src/core/iomgr/alarm_heap.h"
#include "src/core/iomgr/alarm_internal.h"
#include "src/core/iomgr/time_averaged_stats.h"
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
@ -67,6 +68,7 @@ typedef struct {
static gpr_mu g_mu;
/* Allow only one run_some_expired_alarms at once */
static gpr_mu g_checker_mu;
static gpr_clock_type g_clock_type;
static shard_type g_shards[NUM_SHARDS];
/* Protected by g_mu */
static shard_type *g_shard_queue[NUM_SHARDS];
@ -85,6 +87,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
gpr_mu_init(&g_mu);
gpr_mu_init(&g_checker_mu);
g_clock_type = now.clock_type;
for (i = 0; i < NUM_SHARDS; i++) {
shard_type *shard = &g_shards[i];
@ -102,7 +105,7 @@ void grpc_alarm_list_init(gpr_timespec now) {
void grpc_alarm_list_shutdown(void) {
int i;
while (run_some_expired_alarms(NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL,
while (run_some_expired_alarms(NULL, gpr_inf_future(g_clock_type), NULL,
0))
;
for (i = 0; i < NUM_SHARDS; i++) {
@ -175,6 +178,8 @@ void grpc_alarm_init(grpc_alarm *alarm, gpr_timespec deadline,
gpr_timespec now) {
int is_first_alarm = 0;
shard_type *shard = &g_shards[shard_idx(alarm)];
GPR_ASSERT(deadline.clock_type == g_clock_type);
GPR_ASSERT(now.clock_type == g_clock_type);
alarm->cb = alarm_cb;
alarm->cb_arg = alarm_cb_arg;
alarm->deadline = deadline;
@ -355,9 +360,10 @@ static int run_some_expired_alarms(gpr_mu *drop_mu, gpr_timespec now,
}
int grpc_alarm_check(gpr_mu *drop_mu, gpr_timespec now, gpr_timespec *next) {
GPR_ASSERT(now.clock_type == g_clock_type);
return run_some_expired_alarms(
drop_mu, now, next,
gpr_time_cmp(now, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0);
gpr_time_cmp(now, gpr_inf_future(now.clock_type)) != 0);
}
gpr_timespec grpc_alarm_list_next_timeout(void) {

@ -57,9 +57,9 @@ static grpc_iomgr_object g_root_object;
static void background_callback_executor(void *ignored) {
gpr_mu_lock(&g_mu);
while (!g_shutdown) {
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
gpr_timespec short_deadline = gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_millis(100, GPR_TIMESPAN));
if (g_cbs_head) {
grpc_iomgr_closure *closure = g_cbs_head;
g_cbs_head = closure->next;
@ -67,7 +67,7 @@ static void background_callback_executor(void *ignored) {
gpr_mu_unlock(&g_mu);
closure->cb(closure->cb_arg, closure->success);
gpr_mu_lock(&g_mu);
} else if (grpc_alarm_check(&g_mu, gpr_now(GPR_CLOCK_REALTIME),
} else if (grpc_alarm_check(&g_mu, gpr_now(GPR_CLOCK_MONOTONIC),
&deadline)) {
} else {
gpr_mu_unlock(&g_mu);
@ -90,7 +90,7 @@ void grpc_iomgr_init(void) {
gpr_thd_id id;
gpr_mu_init(&g_mu);
gpr_cv_init(&g_rcv);
grpc_alarm_list_init(gpr_now(GPR_CLOCK_REALTIME));
grpc_alarm_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
g_root_object.next = g_root_object.prev = &g_root_object;
g_root_object.name = "root";
grpc_iomgr_platform_init();
@ -145,7 +145,7 @@ void grpc_iomgr_shutdown(void) {
} while (g_cbs_head);
continue;
}
if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)) {
if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL)) {
continue;
}
if (g_root_object.next != &g_root_object) {

@ -136,7 +136,7 @@ static void finish_shutdown(grpc_pollset *pollset) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
/* pollset->mu already held */
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(now, deadline) > 0) {
return 0;
}
@ -205,7 +205,7 @@ int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
gpr_timespec now) {
gpr_timespec timeout;
static const int max_spin_polling_us = 10;
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
return -1;
}
if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(

@ -70,7 +70,7 @@ void grpc_pollset_destroy(grpc_pollset *pollset) {
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) {
gpr_timespec now;
now = gpr_now(GPR_CLOCK_REALTIME);
now = gpr_now(GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(now, deadline) > 0) {
return 0 /* GPR_FALSE */;
}

@ -259,8 +259,8 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
ac->write_closure.cb_arg = ac;
gpr_mu_lock(&ac->mu);
grpc_alarm_init(&ac->alarm, deadline, tc_on_alarm, ac,
gpr_now(GPR_CLOCK_REALTIME));
grpc_alarm_init(&ac->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
gpr_mu_unlock(&ac->mu);

@ -216,7 +216,7 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *tcp),
ac->aborted = 0;
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac,
gpr_now(GPR_CLOCK_REALTIME));
gpr_now(GPR_CLOCK_MONOTONIC));
socket->write_info.outstanding = 1;
grpc_socket_notify_on_write(socket, on_connect, ac);
return;

@ -63,10 +63,11 @@ void gpr_cv_destroy(gpr_cv *cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int err = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) {
err = pthread_cond_wait(cv, mu);
} else {
struct timespec abs_deadline_ts;
abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
abs_deadline_ts.tv_sec = abs_deadline.tv_sec;
abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);

@ -83,10 +83,10 @@ int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
int timeout = 0;
DWORD timeout_max_ms;
mu->locked = 0;
if (gpr_time_cmp(abs_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) == 0) {
if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) == 0) {
SleepConditionVariableCS(cv, &mu->cs, INFINITE);
} else {
gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
gpr_timespec now = gpr_now(abs_deadline.clock_type);
gpr_int64 now_ms = now.tv_sec * 1000 + now.tv_nsec / 1000000;
gpr_int64 deadline_ms =
abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;

@ -290,3 +290,30 @@ gpr_int32 gpr_time_to_millis(gpr_timespec t) {
double gpr_timespec_to_micros(gpr_timespec t) {
return (double)t.tv_sec * GPR_US_PER_SEC + t.tv_nsec * 1e-3;
}
gpr_timespec gpr_convert_clock_type(gpr_timespec t, gpr_clock_type clock_type) {
if (t.clock_type == clock_type) {
return t;
}
if (t.tv_nsec == 0) {
if (t.tv_sec == TYPE_MAX(time_t)) {
t.clock_type = clock_type;
return t;
}
if (t.tv_sec == TYPE_MIN(time_t)) {
t.clock_type = clock_type;
return t;
}
}
if (clock_type == GPR_TIMESPAN) {
return gpr_time_sub(t, gpr_now(t.clock_type));
}
if (t.clock_type == GPR_TIMESPAN) {
return gpr_time_add(gpr_now(clock_type), t);
}
return gpr_time_add(gpr_now(clock_type), gpr_time_sub(t, gpr_now(t.clock_type)));
}

@ -120,7 +120,7 @@ void gpr_sleep_until(gpr_timespec until) {
for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */
now = gpr_now(GPR_CLOCK_REALTIME);
now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) {
return;
}

@ -80,7 +80,7 @@ void gpr_sleep_until(gpr_timespec until) {
for (;;) {
/* We could simplify by using clock_nanosleep instead, but it might be
* slightly less portable. */
now = gpr_now(GPR_CLOCK_REALTIME);
now = gpr_now(until.clock_type);
if (gpr_time_cmp(until, now) <= 0) {
return;
}

@ -348,7 +348,7 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
}
grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
CALL_STACK_FROM_CALL(call));
if (gpr_time_cmp(send_deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) != 0) {
set_deadline_alarm(call, send_deadline);
}
return call;
@ -1278,8 +1278,8 @@ static void set_deadline_alarm(grpc_call *call, gpr_timespec deadline) {
}
GRPC_CALL_INTERNAL_REF(call, "alarm");
call->have_alarm = 1;
grpc_alarm_init(&call->alarm, deadline, call_alarm, call,
gpr_now(GPR_CLOCK_REALTIME));
grpc_alarm_init(&call->alarm, gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC), call_alarm, call,
gpr_now(GPR_CLOCK_MONOTONIC));
}
/* we offset status by a small amount when storing it into transport metadata

@ -148,6 +148,8 @@ grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
gpr_timespec deadline) {
grpc_event ret;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "next");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {
@ -188,6 +190,8 @@ grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
grpc_cq_completion *c;
grpc_cq_completion *prev;
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
GRPC_CQ_INTERNAL_REF(cc, "pluck");
gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
for (;;) {

@ -438,7 +438,7 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
char timeout_str[GRPC_CHTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
grpc_mdelem *mdelem;
grpc_chttp2_encode_timeout(
gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)), timeout_str);
gpr_time_sub(deadline, gpr_now(deadline.clock_type)), timeout_str);
mdelem = grpc_mdelem_from_metadata_strings(
c->mdctx, GRPC_MDSTR_REF(c->timeout_key_str),
grpc_mdstr_from_string(c->mdctx, timeout_str));
@ -560,6 +560,7 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
grpc_mdctx *mdctx = compressor->mdctx;
grpc_linked_mdelem *l;
int need_unref = 0;
gpr_timespec deadline;
GPR_ASSERT(stream_id != 0);
@ -589,9 +590,9 @@ void grpc_chttp2_encode(grpc_stream_op *ops, size_t ops_count, int eof,
l->md = hpack_enc(compressor, l->md, &st);
need_unref |= l->md != NULL;
}
if (gpr_time_cmp(op->data.metadata.deadline,
gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
deadline_enc(compressor, op->data.metadata.deadline, &st);
deadline = op->data.metadata.deadline;
if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) != 0) {
deadline_enc(compressor, deadline, &st);
}
curop++;
break;

@ -61,7 +61,7 @@ static void put_metadata_list(gpr_strvec *b, grpc_metadata_batch md) {
if (m != md.list.head) gpr_strvec_add(b, gpr_strdup(", "));
put_metadata(b, m->md);
}
if (gpr_time_cmp(md.deadline, gpr_inf_future(GPR_CLOCK_REALTIME)) != 0) {
if (gpr_time_cmp(md.deadline, gpr_inf_future(md.deadline.clock_type)) != 0) {
char *tmp;
gpr_asprintf(&tmp, " deadline=%d.%09d", md.deadline.tv_sec,
md.deadline.tv_nsec);

@ -41,6 +41,7 @@
#include <stdlib.h>
#include <string.h>
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
@ -100,7 +101,7 @@ static void test_grpc_alarm(void) {
alarm_arg arg2;
void *fdone;
grpc_iomgr_init();
grpc_init();
arg.counter = 0;
arg.success = SUCCESS_NOT_SET;
@ -113,7 +114,7 @@ static void test_grpc_alarm(void) {
gpr_event_init(&arg.fcb_arg);
grpc_alarm_init(&alarm, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100), alarm_cb, &arg,
gpr_now(GPR_CLOCK_REALTIME));
gpr_now(GPR_CLOCK_MONOTONIC));
alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);
gpr_mu_lock(&arg.mu);
@ -165,7 +166,7 @@ static void test_grpc_alarm(void) {
gpr_event_init(&arg2.fcb_arg);
grpc_alarm_init(&alarm_to_cancel, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100),
alarm_cb, &arg2, gpr_now(GPR_CLOCK_REALTIME));
alarm_cb, &arg2, gpr_now(GPR_CLOCK_MONOTONIC));
grpc_alarm_cancel(&alarm_to_cancel);
alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);
@ -214,7 +215,7 @@ static void test_grpc_alarm(void) {
gpr_mu_destroy(&arg2.mu);
gpr_free(arg2.followup_closure);
grpc_iomgr_shutdown();
grpc_shutdown();
}
int main(int argc, char **argv) {

@ -254,7 +254,7 @@ static void read_and_write_test(grpc_endpoint_test_config config,
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!state.read_done || !state.write_done) {
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
grpc_pollset_work(g_pollset, deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
@ -350,14 +350,14 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config,
deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!write_st.done) {
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
grpc_endpoint_destroy(write_st.ep);
gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
while (!read_st.done) {
GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
grpc_pollset_work(g_pollset, deadline);
}
gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));

@ -249,7 +249,7 @@ static int server_start(server *sv) {
static void server_wait_and_shutdown(server *sv) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!sv->done) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
@ -356,7 +356,7 @@ static void client_start(client *cl, int port) {
static void client_wait_and_shutdown(client *cl) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (!cl->done) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
@ -445,7 +445,7 @@ static void test_grpc_fd_change(void) {
/* And now wait for it to run. */
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (a.cb_that_ran == NULL) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
GPR_ASSERT(a.cb_that_ran == first_read_callback);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
@ -463,7 +463,7 @@ static void test_grpc_fd_change(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (b.cb_that_ran == NULL) {
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
grpc_pollset_work(&g_pollset, gpr_inf_future(GPR_CLOCK_MONOTONIC));
}
/* Except now we verify that second_read_callback ran instead */
GPR_ASSERT(b.cb_that_ran == second_read_callback);

@ -196,13 +196,13 @@ void test_times_out(void) {
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
while (gpr_time_cmp(gpr_time_add(connect_deadline,
gpr_time_from_seconds(2, GPR_TIMESPAN)),
gpr_now(GPR_CLOCK_REALTIME)) > 0) {
gpr_now(connect_deadline.clock_type)) > 0) {
int is_after_deadline =
gpr_time_cmp(connect_deadline, gpr_now(GPR_CLOCK_REALTIME)) <= 0;
gpr_time_cmp(connect_deadline, gpr_now(GPR_CLOCK_MONOTONIC)) <= 0;
if (is_after_deadline &&
gpr_time_cmp(gpr_time_add(connect_deadline,
gpr_time_from_seconds(1, GPR_TIMESPAN)),
gpr_now(GPR_CLOCK_REALTIME)) > 0) {
gpr_now(GPR_CLOCK_MONOTONIC)) > 0) {
/* allow some slack before insisting that things be done */
} else {
GPR_ASSERT(g_connections_complete ==

@ -135,7 +135,7 @@ static void test_connect(int n) {
gpr_log(GPR_DEBUG, "wait");
while (g_nconnects == nconnects_before &&
gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) {
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_work(&g_pollset, deadline);
}
gpr_log(GPR_DEBUG, "wait done");

@ -52,12 +52,12 @@ extern "C" {
(GRPC_TEST_SLOWDOWN_BUILD_FACTOR * GRPC_TEST_SLOWDOWN_MACHINE_FACTOR)
#define GRPC_TIMEOUT_SECONDS_TO_DEADLINE(x) \
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), \
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), \
gpr_time_from_micros(GRPC_TEST_SLOWDOWN_FACTOR * 1e6 * (x), \
GPR_TIMESPAN))
#define GRPC_TIMEOUT_MILLIS_TO_DEADLINE(x) \
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), \
gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), \
gpr_time_from_micros(GRPC_TEST_SLOWDOWN_FACTOR * 1e3 * (x), \
GPR_TIMESPAN))

@ -69,6 +69,7 @@ DEFINE_string(test_case, "large_unary",
"compute_engine_creds: large_unary with compute engine auth; "
"jwt_token_creds: large_unary with JWT token auth; "
"oauth2_auth_token: raw oauth2 access token auth; "
"per_rpc_creds: raw oauth2 access token on a single rpc; "
"all : all of above.");
DEFINE_string(default_service_account, "",
"Email of GCE default service account");
@ -117,6 +118,9 @@ int main(int argc, char** argv) {
} else if (FLAGS_test_case == "oauth2_auth_token") {
grpc::string json_key = GetServiceAccountJsonKey();
client.DoOauth2AuthToken(json_key, FLAGS_oauth_scope);
} else if (FLAGS_test_case == "per_rpc_creds") {
grpc::string json_key = GetServiceAccountJsonKey();
client.DoPerRpcCreds(json_key, FLAGS_oauth_scope);
} else if (FLAGS_test_case == "all") {
client.DoEmpty();
client.DoLargeUnary();
@ -133,6 +137,7 @@ int main(int argc, char** argv) {
client.DoServiceAccountCreds(json_key, FLAGS_oauth_scope);
client.DoJwtTokenCreds(json_key);
client.DoOauth2AuthToken(json_key, FLAGS_oauth_scope);
client.DoPerRpcCreds(json_key, FLAGS_oauth_scope);
}
// compute_engine_creds only runs in GCE.
} else {
@ -142,7 +147,7 @@ int main(int argc, char** argv) {
"large_unary|client_streaming|server_streaming|half_duplex|ping_pong|"
"cancel_after_begin|cancel_after_first_response|"
"timeout_on_sleeping_server|service_account_creds|compute_engine_creds|"
"jwt_token_creds|oauth2_auth_token",
"jwt_token_creds|oauth2_auth_token|per_rpc_creds",
FLAGS_test_case.c_str());
ret = 1;
}

@ -41,8 +41,10 @@
#include <grpc/support/log.h>
#include <grpc++/channel_interface.h>
#include <grpc++/client_context.h>
#include <grpc++/credentials.h>
#include <grpc++/status.h>
#include <grpc++/stream.h>
#include "test/cpp/interop/client_helper.h"
#include "test/proto/test.grpc.pb.h"
#include "test/proto/empty.grpc.pb.h"
#include "test/proto/messages.grpc.pb.h"
@ -166,6 +168,32 @@ void InteropClient::DoOauth2AuthToken(const grpc::string& username,
gpr_log(GPR_INFO, "Unary with oauth2 access token credentials done.");
}
void InteropClient::DoPerRpcCreds(const grpc::string& username,
const grpc::string& oauth_scope) {
gpr_log(GPR_INFO,
"Sending a unary rpc with per-rpc raw oauth2 access token ...");
SimpleRequest request;
SimpleResponse response;
request.set_fill_username(true);
request.set_fill_oauth_scope(true);
std::unique_ptr<TestService::Stub> stub(TestService::NewStub(channel_));
ClientContext context;
grpc::string access_token = GetOauth2AccessToken();
std::shared_ptr<Credentials> creds = AccessTokenCredentials(access_token);
context.set_credentials(creds);
Status s = stub->UnaryCall(&context, request, &response);
AssertOkOrPrintErrorStatus(s);
GPR_ASSERT(!response.username().empty());
GPR_ASSERT(!response.oauth_scope().empty());
GPR_ASSERT(username.find(response.username()) != grpc::string::npos);
const char* oauth_scope_str = response.oauth_scope().c_str();
GPR_ASSERT(oauth_scope.find(oauth_scope_str) != grpc::string::npos);
gpr_log(GPR_INFO, "Unary with per-rpc oauth2 access token done.");
}
void InteropClient::DoJwtTokenCreds(const grpc::string& username) {
gpr_log(GPR_INFO, "Sending a large unary rpc with JWT token credentials ...");
SimpleRequest request;

@ -71,6 +71,9 @@ class InteropClient {
// username is a string containing the user email
void DoOauth2AuthToken(const grpc::string& username,
const grpc::string& oauth_scope);
// username is a string containing the user email
void DoPerRpcCreds(const grpc::string& username,
const grpc::string& oauth_scope);
private:
void PerformLargeUnary(SimpleRequest* request, SimpleResponse* response);

@ -39,19 +39,19 @@ if [ "$platform" == "linux" ]; then
sha1=$(sha1sum tools/jenkins/grpc_linuxbrew/Dockerfile | cut -f1 -d\ )
DOCKER_IMAGE_NAME=grpc_linuxbrew_$sha1
# build docker image, contains all pre-requisites
docker build -t $DOCKER_IMAGE_NAME tools/jenkins/grpc_linuxbrew
supported="python nodejs ruby php"
if [ "$language" == "core" ]; then
command="curl -fsSL https://goo.gl/getgrpc | bash -"
elif [[ "$supported" =~ "$language" ]]; then
elif [[ "python nodejs ruby php" =~ "$language" ]]; then
command="curl -fsSL https://goo.gl/getgrpc | bash -s $language"
else
echo "unsupported language $language"
exit 1
fi
# run per-language homebrew installation script
docker run $DOCKER_IMAGE_NAME bash -l \
-c "nvm use 0.12; \
npm set unsafe-perm true; \
@ -66,26 +66,81 @@ if [ "$platform" == "linux" ]; then
elif [ "$platform" == "macos" ]; then
if [ "$dist_channel" == "homebrew" ]; then
which brew # TODO: for debug, can be removed later
# system installed homebrew, don't interfere
brew list -l
dir=/tmp/homebrew-test-$language
rm -rf $dir
mkdir -p $dir
git clone https://github.com/Homebrew/homebrew.git $dir
cd $dir
# TODO: Uncomment these when the general structure of the script is verified
# PATH=$dir/bin:$PATH brew tap homebrew/dupes
# PATH=$dir/bin:$PATH brew install zlib
# PATH=$dir/bin:$PATH brew install openssl
# PATH=$dir/bin:$PATH brew tap grpc/grpc
# PATH=$dir/bin:$PATH brew install --without-python google-protobuf
# PATH=$dir/bin:$PATH brew install grpc
PATH=$dir/bin:$PATH brew list -l
# Set up temp directories for test installation of homebrew
brew_root=/tmp/homebrew-test-$language
rm -rf $brew_root
mkdir -p $brew_root
git clone https://github.com/Homebrew/homebrew.git $brew_root
# Install grpc via homebrew
#
# The temp $PATH env variable makes sure we are operating at the right copy of
# temp homebrew installation, and do not interfere with the system's main brew
# installation.
#
# TODO: replace the next section with the actual homebrew installation script
# i.e. curl -fsSL https://goo.gl/getgrpc | bash -s $language
# need to resolve a bunch of environment and privilege issue on the jenkins
# mac machine itself
export OLD_PATH=$PATH
export PATH=$brew_root/bin:$PATH
cd $brew_root
brew tap homebrew/dupes
brew install zlib
brew install openssl
brew tap grpc/grpc
brew install --without-python google-protobuf
brew install grpc
brew list -l
# Install per-language modules/extensions on top of core grpc
#
# If a command below needs root access, the binary had been added to
# /etc/sudoers. This step needs to be repeated if we add more mac instances
# to our jenkins project.
#
# Examples (lines that needed to be added to /etc/sudoers):
# + Defaults env_keep += "CFLAGS CXXFLAGS LDFLAGS enable_grpc"
# + jenkinsnode1 ALL=(ALL) NOPASSWD: /usr/bin/pecl, /usr/local/bin/pip,
# + /usr/local/bin/npm
case $language in
*core*) ;;
*python*)
sudo CFLAGS=-I$brew_root/include LDFLAGS=-L$brew_root/lib pip install grpcio
pip list | grep grpcio
echo 'y' | sudo pip uninstall grpcio
;;
*nodejs*)
sudo CXXFLAGS=-I$brew_root/include LDFLAGS=-L$brew_root/lib npm install grpc
npm list | grep grpc
sudo npm uninstall grpc
;;
*ruby*)
gem install grpc -- --with-grpc-dir=$brew_root
gem list | grep grpc
gem uninstall grpc
;;
*php*)
sudo enable_grpc=$brew_root CFLAGS="-Wno-parentheses-equality" pecl install grpc-alpha
pecl list | grep grpc
sudo pecl uninstall grpc
;;
*)
echo "Unsupported language $language"
exit 1
;;
esac
# clean up
cd ~/
rm -rf $dir
echo $PATH # TODO: for debug, can be removed later
brew list -l # TODO: for debug, can be removed later
rm -rf $brew_root
# Make sure the system brew installation is still unaffected
export PATH=$OLD_PATH
brew list -l
else
echo "Unsupported $platform dist_channel $dist_channel"

Loading…
Cancel
Save