Merge master

pull/13659/head
Yash Tibrewal 7 years ago
commit d6c292f177
  1. 4
      src/core/ext/filters/client_channel/backup_poller.cc
  2. 16
      src/core/ext/filters/client_channel/client_channel.cc
  3. 5
      src/core/ext/filters/client_channel/lb_policy.cc
  4. 6
      src/core/ext/filters/client_channel/lb_policy.h
  5. 65
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  6. 10
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  7. 33
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  8. 6
      src/core/ext/filters/client_channel/subchannel.cc
  9. 3
      src/core/ext/filters/client_channel/subchannel.h
  10. 4
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  11. 15
      src/core/ext/transport/chttp2/transport/writing.cc
  12. 2
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  13. 2
      src/core/lib/surface/channel_ping.cc
  14. 12
      src/core/lib/surface/lame_client.cc
  15. 10
      src/core/lib/transport/transport.h
  16. 2
      src/core/lib/transport/transport_op_string.cc
  17. 23
      src/ruby/end2end/multiple_killed_watching_threads_driver.rb
  18. 21
      test/cpp/end2end/client_lb_end2end_test.cc
  19. 139
      tools/interop_matrix/client_matrix.py
  20. 39
      tools/interop_matrix/create_matrix_images.py
  21. 38
      tools/interop_matrix/patches/README.md
  22. 34
      tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch
  23. 6
      tools/interop_matrix/run_interop_matrix_tests.py
  24. 20
      tools/interop_matrix/testcases/ruby__v1.0.1

@ -82,8 +82,8 @@ static void done_poller(void* arg, grpc_error* error) {
}
static void g_poller_unref() {
gpr_mu_lock(&g_poller_mu);
if (gpr_unref(&g_poller->refs)) {
gpr_mu_lock(&g_poller_mu);
backup_poller* p = g_poller;
g_poller = nullptr;
gpr_mu_unlock(&g_poller_mu);
@ -94,6 +94,8 @@ static void g_poller_unref() {
grpc_schedule_on_exec_ctx));
gpr_mu_unlock(p->pollset_mu);
grpc_timer_cancel(&p->polling_timer);
} else {
gpr_mu_unlock(&g_poller_mu);
}
}

@ -625,15 +625,21 @@ static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
op->connectivity_state = nullptr;
}
if (op->send_ping != nullptr) {
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
if (chand->lb_policy == nullptr) {
GRPC_CLOSURE_SCHED(op->send_ping, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Ping with no load balancing"));
GRPC_CLOSURE_SCHED(
op->send_ping.on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
GRPC_CLOSURE_SCHED(
op->send_ping.on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
} else {
grpc_lb_policy_ping_one_locked(chand->lb_policy, op->send_ping);
grpc_lb_policy_ping_one_locked(
chand->lb_policy, op->send_ping.on_initiate, op->send_ping.on_ack);
op->bind_pollset = nullptr;
}
op->send_ping = nullptr;
op->send_ping.on_initiate = nullptr;
op->send_ping.on_ack = nullptr;
}
if (op->disconnect_with_error != GRPC_ERROR_NONE) {

@ -128,8 +128,9 @@ void grpc_lb_policy_exit_idle_locked(grpc_lb_policy* policy) {
}
void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* closure) {
policy->vtable->ping_one_locked(policy, closure);
grpc_closure* on_initiate,
grpc_closure* on_ack) {
policy->vtable->ping_one_locked(exec_ctx, policy, on_initiate, on_ack);
}
void grpc_lb_policy_notify_on_state_change_locked(

@ -77,7 +77,8 @@ struct grpc_lb_policy_vtable {
grpc_error* error);
/** \see grpc_lb_policy_ping_one */
void (*ping_one_locked)(grpc_lb_policy* policy, grpc_closure* closure);
void (*ping_one_locked)(grpc_lb_policy* policy, grpc_closure* on_initiate,
grpc_closure* on_ack);
/** Try to enter a READY connectivity state */
void (*exit_idle_locked)(grpc_lb_policy* policy);
@ -166,7 +167,8 @@ int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
/** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
against one of the connected subchannels managed by \a policy. */
void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
grpc_closure* closure);
grpc_closure* on_initiate,
grpc_closure* on_ack);
/** Cancel picks for \a target.
The \a on_complete callback of the pending picks will be invoked with \a

@ -274,18 +274,30 @@ static void add_pending_pick(pending_pick** root,
typedef struct pending_ping {
struct pending_ping* next;
/* args for wrapped_notify */
wrapped_rr_closure_arg wrapped_notify_arg;
/* args for sending the ping */
wrapped_rr_closure_arg* on_initiate;
wrapped_rr_closure_arg* on_ack;
} pending_ping;
static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
grpc_closure* on_ack) {
pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
pping->wrapped_notify_arg.wrapped_closure = notify;
pping->wrapped_notify_arg.free_when_done = pping;
if (on_initiate != nullptr) {
pping->on_initiate =
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
pping->on_initiate->wrapped_closure = on_initiate;
pping->on_initiate->free_when_done = pping->on_initiate;
GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
&pping->on_initiate, grpc_schedule_on_exec_ctx);
}
if (on_ack != nullptr) {
pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
pping->on_ack->wrapped_closure = on_ack;
pping->on_ack->free_when_done = pping->on_ack;
GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
&pping->on_ack, grpc_schedule_on_exec_ctx);
}
pping->next = *root;
GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
wrapped_rr_closure, &pping->wrapped_notify_arg,
grpc_schedule_on_exec_ctx);
*root = pping;
}
@ -815,14 +827,24 @@ static void create_rr_locked(glb_lb_policy* glb_policy,
pending_ping* pping;
while ((pping = glb_policy->pending_pings)) {
glb_policy->pending_pings = pping->next;
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
grpc_closure* on_initiate = nullptr;
grpc_closure* on_ack = nullptr;
if (pping->on_initiate != nullptr) {
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->on_initiate->rr_policy = glb_policy->rr_policy;
on_initiate = &pping->on_initiate->wrapper_closure;
}
if (pping->on_ack != nullptr) {
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
pping->on_ack->rr_policy = glb_policy->rr_policy;
on_ack = &pping->on_ack->wrapper_closure;
}
if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
glb_policy, glb_policy->rr_policy);
}
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy,
&pping->wrapped_notify_arg.wrapper_closure);
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
gpr_free(pping);
}
}
@ -1037,8 +1059,16 @@ static void glb_shutdown_locked(grpc_lb_policy* pol) {
while (pping != nullptr) {
pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED(&pping->wrapped_notify_arg.wrapper_closure,
GRPC_ERROR_REF(error));
if (pping->on_initiate != nullptr) {
GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure,
GRPC_ERROR_REF(error));
gpr_free(pping->on_initiate);
}
if (pping->on_ack != nullptr) {
GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
GRPC_ERROR_REF(error));
gpr_free(pping->on_ack);
}
gpr_free(pping);
pping = next;
}
@ -1229,12 +1259,13 @@ static grpc_connectivity_state glb_check_connectivity_locked(
connectivity_error);
}
static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
if (glb_policy->rr_policy) {
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, closure);
grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
if (!glb_policy->started_picking) {
start_picking_locked(glb_policy);
}

@ -221,12 +221,16 @@ static void pf_notify_on_state_change_locked(grpc_lb_policy* pol,
notify);
}
static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
if (p->selected) {
grpc_connected_subchannel_ping(p->selected->connected_subchannel, closure);
grpc_connected_subchannel_ping(p->selected->connected_subchannel,
on_initiate, on_ack);
} else {
GRPC_CLOSURE_SCHED(closure,
GRPC_CLOSURE_SCHED(on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
GRPC_CLOSURE_SCHED(on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
}
}

@ -361,21 +361,16 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
* CHECK: subchannel_list->num_shutdown ==
* subchannel_list->num_subchannels.
*
* 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
* 4) RULE: ALL subchannels are SHUTDOWN or TRANSIENT_FAILURE => policy is
* TRANSIENT_FAILURE.
* CHECK: subchannel_list->num_transient_failures ==
* CHECK: subchannel_list->num_shutdown +
* subchannel_list->num_transient_failures ==
* subchannel_list->num_subchannels.
*
* 5) RULE: ALL subchannels are IDLE => policy is IDLE.
* CHECK: subchannel_list->num_idle == subchannel_list->num_subchannels.
* (Note that all the subchannels will transition from IDLE to CONNECTING
* in batch when we start trying to connect.)
*/
// TODO(juanlishen): if the subchannel states are mixed by {SHUTDOWN,
// TRANSIENT_FAILURE}, we don't change the state. We may want to improve on
// this.
// TODO(juanlishen): For rule 4, we may want to re-resolve instead.
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
if (subchannel_list->num_ready > 0) {
/* 1) READY */
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
@ -393,16 +388,13 @@ static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
p->started_picking = false;
grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
GRPC_ERROR_NONE);
} else if (subchannel_list->num_transient_failures ==
} else if (subchannel_list->num_shutdown +
subchannel_list->num_transient_failures ==
subchannel_list->num_subchannels) {
/* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(&p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure");
} else if (subchannel_list->num_idle == subchannel_list->num_subchannels) {
/* 5) IDLE */
grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, "rr_idle");
}
GRPC_ERROR_UNREF(error);
}
@ -539,7 +531,8 @@ static void rr_notify_on_state_change_locked(grpc_lb_policy* pol,
notify);
}
static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
grpc_closure* on_ack) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
@ -547,11 +540,13 @@ static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
&p->subchannel_list->subchannels[next_ready_index];
grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
selected->connected_subchannel, "rr_ping");
grpc_connected_subchannel_ping(target, closure);
grpc_connected_subchannel_ping(target, on_initiate, on_ack);
GRPC_CONNECTED_SUBCHANNEL_UNREF(target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
}
}

@ -562,10 +562,12 @@ void grpc_connected_subchannel_notify_on_state_change(
}
void grpc_connected_subchannel_ping(grpc_connected_subchannel* con,
grpc_closure* closure) {
grpc_closure* on_initiate,
grpc_closure* on_ack) {
grpc_transport_op* op = grpc_make_transport_op(nullptr);
grpc_channel_element* elem;
op->send_ping = closure;
op->send_ping.on_initiate = on_initiate;
op->send_ping.on_ack = on_ack;
elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
elem->filter->start_transport_op(elem, op);
}

@ -125,7 +125,8 @@ void grpc_connected_subchannel_notify_on_state_change(
grpc_connected_subchannel* channel, grpc_pollset_set* interested_parties,
grpc_connectivity_state* state, grpc_closure* notify);
void grpc_connected_subchannel_ping(grpc_connected_subchannel* channel,
grpc_closure* notify);
grpc_closure* on_initiate,
grpc_closure* on_ack);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */

@ -1725,8 +1725,8 @@ static void perform_transport_op_locked(void* stream_op,
grpc_endpoint_add_to_pollset_set(t->ep, op->bind_pollset_set);
}
if (op->send_ping) {
send_ping_locked(t, nullptr, op->send_ping);
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
send_ping_locked(t, op->send_ping.on_initiate, op->send_ping.on_ack);
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
}

@ -67,14 +67,15 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
return;
}
grpc_millis now = grpc_core::ExecCtx::Get()->Now();
grpc_millis next_allowed_ping_interval =
(t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0)
? 7200 * GPR_MS_PER_SEC
: t->ping_policy.min_sent_ping_interval_without_data;
grpc_millis next_allowed_ping =
t->ping_state.last_ping_sent_time +
t->ping_policy.min_sent_ping_interval_without_data;
if (t->keepalive_permit_without_calls == 0 &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
next_allowed_ping =
t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
}
t->ping_state.last_ping_sent_time + next_allowed_ping_interval;
if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {

@ -397,7 +397,7 @@ static void execute_from_storage(stream_obj* s) {
Cronet callback
*/
static void on_failed(bidirectional_stream* stream, int net_error) {
CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
gpr_log(GPR_ERROR, "on_failed(%p, %d)", stream, net_error);
grpc_core::ExecCtx exec_ctx;
stream_obj* s = (stream_obj*)stream->annotation;

@ -56,7 +56,7 @@ void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
pr->tag = tag;
pr->cq = cq;
GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
op->send_ping = &pr->closure;
op->send_ping.on_ack = &pr->closure;
op->bind_pollset = grpc_cq_pollset(cq);
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
top_elem->filter->start_transport_op(top_elem, op);

@ -99,9 +99,15 @@ static void lame_start_transport_op(grpc_channel_element* elem,
*op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
GRPC_CLOSURE_SCHED(op->on_connectivity_state_change, GRPC_ERROR_NONE);
}
if (op->send_ping != nullptr) {
GRPC_CLOSURE_SCHED(op->send_ping, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"lame client channel"));
if (op->send_ping.on_initiate != nullptr) {
GRPC_CLOSURE_SCHED(
op->send_ping.on_initiate,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
if (op->send_ping.on_ack != nullptr) {
GRPC_CLOSURE_SCHED(
op->send_ping.on_ack,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
if (op->on_consumed != nullptr) {

@ -243,8 +243,14 @@ typedef struct grpc_transport_op {
grpc_pollset* bind_pollset;
/** add this transport to a pollset_set */
grpc_pollset_set* bind_pollset_set;
/** send a ping, call this back if not NULL */
grpc_closure* send_ping;
/** send a ping, if either on_initiate or on_ack is not NULL */
struct {
/** Ping may be delayed by the transport, on_initiate callback will be
called when the ping is actually being sent. */
grpc_closure* on_initiate;
/** Called when the ping ack is received */
grpc_closure* on_ack;
} send_ping;
/***************************************************************************
* remaining fields are initialized and used at the discretion of the

@ -187,7 +187,7 @@ char* grpc_transport_op_string(grpc_transport_op* op) {
gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
}
if (op->send_ping != nullptr) {
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
// first = false;
gpr_strvec_add(&b, gpr_strdup("SEND_PING"));

@ -20,7 +20,7 @@ Thread.abort_on_exception = true
include GRPC::Core::ConnectivityStates
def watch_state(ch)
def watch_state(ch, sleep_time)
thd = Thread.new do
state = ch.connectivity_state(false)
fail "non-idle state: #{state}" unless state == IDLE
@ -28,23 +28,34 @@ def watch_state(ch)
end
# sleep to get the thread into the middle of a
# "watch connectivity state" call
sleep 0.1
sleep sleep_time
thd.kill
end
def main
def run_multiple_killed_watches(num_threads, sleep_time)
channels = []
10.times do
num_threads.times do
ch = GRPC::Core::Channel.new('dummy_host',
nil, :this_channel_is_insecure)
watch_state(ch)
watch_state(ch, sleep_time)
channels << ch
end
# checking state should still be safe to call
channels.each do |c|
fail unless c.connectivity_state(false) == FATAL_FAILURE
connectivity_state = c.connectivity_state(false)
# The state should be FATAL_FAILURE in the case that it was interrupted
# while watching connectivity state, and IDLE if it we never started
# watching the channel's connectivity state
unless [FATAL_FAILURE, IDLE].include?(connectivity_state)
fail "unexpected connectivity state: #{connectivity_state}"
end
end
end
def main
run_multiple_killed_watches(10, 0.1)
run_multiple_killed_watches(1000, 0.001)
end
main

@ -572,15 +572,28 @@ TEST_F(ClientLbEnd2endTest, RoundRobinReresolve) {
CheckRpcSendOk();
}
// Kill all servers
gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown(false);
}
// Client request should fail.
CheckRpcSendFailure();
gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
// Client requests should fail. Send enough to tickle all subchannels.
for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure();
gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
// Bring servers back up on the same port (we aren't recreating the channel).
gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
StartServers(kNumServers, ports);
// Client request should succeed.
CheckRpcSendOk();
gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
// Client request should eventually (but still fairly soon) succeed.
const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
while (gpr_time_cmp(deadline, now) > 0) {
if (SendRpc().ok()) break;
now = gpr_now(GPR_CLOCK_MONOTONIC);
}
GPR_ASSERT(gpr_time_cmp(deadline, now) > 0);
}
} // namespace

@ -23,6 +23,18 @@ def get_github_repo(lang):
# all other languages use the grpc.git repo.
}.get(lang, 'git@github.com:grpc/grpc.git')
def get_release_tags(lang):
return map(lambda r: get_release_tag_name(r), LANG_RELEASE_MATRIX[lang])
def get_release_tag_name(release_info):
assert len(release_info.keys()) == 1
return release_info.keys()[0]
def should_build_docker_interop_image_from_release_tag(lang):
if lang in ['go', 'java', 'node']:
return False
return True
# Dictionary of runtimes per language
LANG_RUNTIME_MATRIX = {
'cxx': ['cxx'], # This is actually debian8.
@ -39,81 +51,84 @@ LANG_RUNTIME_MATRIX = {
# a release tag pointing to the latest build of the branch.
LANG_RELEASE_MATRIX = {
'cxx': [
'v1.0.1',
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
'v1.7.2',
{'v1.0.1': None},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
{'v1.7.2': None},
],
'go': [
'v1.0.5',
'v1.2.1',
'v1.3.0',
'v1.4.2',
'v1.5.2',
'v1.6.0',
'v1.7.0',
'v1.7.1',
'v1.7.2',
'v1.7.3',
'v1.8.0',
{'v1.0.5': None},
{'v1.2.1': None},
{'v1.3.0': None},
{'v1.4.2': None},
{'v1.5.2': None},
{'v1.6.0': None},
{'v1.7.0': None},
{'v1.7.1': None},
{'v1.7.2': None},
{'v1.7.3': None},
{'v1.8.0': None},
],
'java': [
'v1.0.3',
'v1.1.2',
'v1.2.0',
'v1.3.1',
'v1.4.0',
'v1.5.0',
'v1.6.1',
'v1.7.0',
'v1.8.0',
{'v1.0.3': None},
{'v1.1.2': None},
{'v1.2.0': None},
{'v1.3.1': None},
{'v1.4.0': None},
{'v1.5.0': None},
{'v1.6.1': None},
{'v1.7.0': None},
{'v1.8.0': None},
],
'python': [
'v1.0.x',
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
'v1.7.2',
{'v1.0.x': None},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
{'v1.7.2': None},
],
'node': [
'v1.0.1',
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
#'v1.7.1', Failing tests.
{'v1.0.1': None},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
#{'v1.7.1': None}, Failing tests
],
'ruby': [
# Ruby v1.0.x doesn't have the fix #8914, therefore not supported.
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
'v1.7.2',
{'v1.0.1': {'patch': [
'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
]}},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
{'v1.7.2': None},
],
'php': [
'v1.0.1',
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
'v1.7.2',
{'v1.0.1': None},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
{'v1.7.2': None},
],
'csharp': [
#'v1.0.1',
'v1.1.4',
'v1.2.5',
'v1.3.9',
'v1.4.2',
'v1.6.6',
'v1.7.2',
#{'v1.0.1': None},
{'v1.1.4': None},
{'v1.2.5': None},
{'v1.3.9': None},
{'v1.4.2': None},
{'v1.6.6': None},
{'v1.7.2': None},
],
}

@ -39,7 +39,7 @@ _IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh'
_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(list(set(
i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
# Destination directory inside docker image to keep extra info from build time.
_BUILD_INFO = '/var/local/build_info'
@ -141,8 +141,11 @@ def build_image_jobspec(runtime, env, gcr_tag, stack_base):
'TTY_FLAG': '-t'
}
build_env.update(env)
image_builder_path = _IMAGE_BUILDER
if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
build_job = jobset.JobSpec(
cmdline=[_IMAGE_BUILDER],
cmdline=[image_builder_path],
environ=build_env,
shortname='build_docker_%s' % runtime,
timeout_seconds=30*60)
@ -157,10 +160,10 @@ def build_all_images_for_lang(lang):
releases = ['master']
else:
if args.release == 'all':
releases = client_matrix.LANG_RELEASE_MATRIX[lang]
releases = client_matrix.get_release_tags(lang)
else:
# Build a particular release.
if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
if args.release not in ['master'] + client_matrix.get_release_tags(lang):
jobset.message('SKIPPED',
'%s for %s is not defined' % (args.release, lang),
do_newline=True)
@ -223,6 +226,33 @@ def cleanup():
docker_images_cleanup = []
atexit.register(cleanup)
def maybe_apply_patches_on_git_tag(stack_base, lang, release):
files_to_patch = []
for release_info in client_matrix.LANG_RELEASE_MATRIX[lang]:
if client_matrix.get_release_tag_name(release_info) == release:
files_to_patch = release_info[release].get('patch')
break
if not files_to_patch:
return
patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
patch_file = os.path.abspath(os.path.join(os.path.dirname(__file__),
patch_file_relative_path))
if not os.path.exists(patch_file):
jobset.message('FAILED', 'expected patch file |%s| to exist' % patch_file)
sys.exit(1)
subprocess.check_output(
['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
for repo_relative_path in files_to_patch:
subprocess.check_output(
['git', 'add', repo_relative_path],
cwd=stack_base,
stderr=subprocess.STDOUT)
subprocess.check_output(
['git', 'commit', '-m', ('Hack performed on top of %s git '
'tag in order to build and run the %s '
'interop tests on that tag.' % (lang, release))],
cwd=stack_base, stderr=subprocess.STDOUT)
def checkout_grpc_stack(lang, release):
"""Invokes 'git check' for the lang/release and returns directory created."""
assert args.git_checkout and args.git_checkout_root
@ -252,6 +282,7 @@ def checkout_grpc_stack(lang, release):
assert not os.path.dirname(__file__).startswith(stack_base)
output = subprocess.check_output(
['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
maybe_apply_patches_on_git_tag(stack_base, lang, release)
commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
jobset.message('SUCCESS', 'git checkout',
'%s: %s' % (str(output), commit_log),

@ -0,0 +1,38 @@
# Patches to grpc repo tags for the backwards compatibility interop tests
This directory has patch files that can be applied to different tags
of the grpc git repo in order to run the interop tests for a specific
language based on that tag.
For example, because the ruby interop tests do not run on the v1.0.1 tag out
of the box, but we still want to test compatibility of the 1.0.1 ruby release
with other versions, we can apply a patch to the v1.0.1 tag from this directory
that makes the necessary changes that are needed to run the ruby interop tests
from that tag. We can then use that patch to build the docker image for the
ruby v1.0.1 interop tests.
## How to add a new patch to this directory
Patch files in this directory are meant to be applied to a git tag
with a `git apply` command.
1. Under the `patches` directory, create a new subdirectory
titled `<language>_<git_tag>` for the git tag being modified.
2. `git checkout <git_tag>`
3. Make necessary modifications to the git repo at that tag.
4.
```
git diff > ~/git_repo.patch
git checkout <current working branch>
cp ~/git_repo.patch tools/interop_matrix/patches/<language>_<git_tag>/
```
5. Edit the `LANGUAGE_RELEASE_MATRIX` in `client_matrix.py` for your language/tag
and add a `'patch': [<files>,....]` entry to it's `dictionary`.
After doing this, the interop image creation script can apply that patch to the
tag with `git apply` before uploading to the test image repo.

@ -0,0 +1,34 @@
diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
index 88b5130..7ae9f7d 100644
--- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
@@ -70,12 +70,12 @@ RUN apt-get update && apt-get install -y time && apt-get clean
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
RUN \curl -sSL https://get.rvm.io | bash -s stable
-# Install Ruby 2.1
-RUN /bin/bash -l -c "rvm install ruby-2.1"
-RUN /bin/bash -l -c "rvm use --default ruby-2.1"
+# Install Ruby 2.1.8
+RUN /bin/bash -l -c "rvm install ruby-2.1.8"
+RUN /bin/bash -l -c "rvm use --default ruby-2.1.8"
RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
-RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
+RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1.8' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
# Prepare ccache
diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
index 97b3860..cec046d 100755
--- a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
+++ b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
@@ -38,7 +38,7 @@ git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
-rvm --default use ruby-2.1
+rvm --default use ruby-2.1.8
# build Ruby interop client and server
(cd src/ruby && gem update bundler && bundle && rake compile)

@ -41,7 +41,7 @@ import upload_test_results
_LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
# All gRPC release tags, flattened, deduped and sorted.
_RELEASES = sorted(list(set(
i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
client_matrix.get_release_tag_name(info) for lang in client_matrix.LANG_RELEASE_MATRIX.values() for info in lang)))
_TEST_TIMEOUT = 30
argp = argparse.ArgumentParser(description='Run interop tests.')
@ -93,10 +93,10 @@ def find_all_images_for_lang(lang):
"""
# Find all defined releases.
if args.release == 'all':
releases = ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]
releases = ['master'] + client_matrix.get_release_tags(lang)
else:
# Look for a particular release.
if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
if args.release not in ['master'] + client_matrix.get_release_tags(lang):
jobset.message('SKIPPED',
'%s for %s is not defined' % (args.release, lang),
do_newline=True)

@ -0,0 +1,20 @@
#!/bin/bash
echo "Testing ${docker_image:=grpc_interop_ruby:6bd1f0eb-51a4-4ad8-861c-1cbd7a929f33}"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"
Loading…
Cancel
Save