Merge branch 'master' into handshaker

pull/10925/head
jiangtaoli2016 8 years ago
commit 876f290545
  1. 31
      .github/ISSUE_TEMPLATE.md
  2. 41
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  3. 11
      src/core/ext/transport/chttp2/transport/internal.h
  4. 7
      src/core/ext/transport/chttp2/transport/parsing.c
  5. 34
      src/core/ext/transport/chttp2/transport/writing.c
  6. 19
      src/core/lib/transport/bdp_estimator.c
  7. 3
      src/core/lib/transport/bdp_estimator.h
  8. 27
      src/python/grpcio/grpc/__init__.py
  9. 8
      templates/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile.template
  10. 10
      templates/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile.template
  11. 42
      templates/tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile.template
  12. 41
      templates/tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile.template
  13. 41
      templates/tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile.template
  14. 46
      templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
  15. 45
      templates/tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile.template
  16. 42
      templates/tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile.template
  17. 15
      test/core/transport/bdp_estimator_test.c
  18. 27
      test/core/util/trickle_endpoint.c
  19. 2
      test/core/util/trickle_endpoint.h
  20. 2
      test/cpp/microbenchmarks/BUILD
  21. 148
      test/cpp/microbenchmarks/bm_fullstack_trickle.cc
  22. 13
      tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile
  23. 24
      tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile
  24. 117
      tools/dockerfile/stress_test/grpc_interop_stress_csharp/Dockerfile
  25. 51
      tools/dockerfile/stress_test/grpc_interop_stress_csharp/build_interop_stress.sh
  26. 132
      tools/dockerfile/stress_test/grpc_interop_stress_cxx/Dockerfile
  27. 51
      tools/dockerfile/stress_test/grpc_interop_stress_cxx/build_interop_stress.sh
  28. 62
      tools/dockerfile/stress_test/grpc_interop_stress_go/build_interop_stress.sh
  29. 117
      tools/dockerfile/stress_test/grpc_interop_stress_java/Dockerfile
  30. 55
      tools/dockerfile/stress_test/grpc_interop_stress_java/build_interop_stress.sh
  31. 109
      tools/dockerfile/stress_test/grpc_interop_stress_node/Dockerfile
  32. 48
      tools/dockerfile/stress_test/grpc_interop_stress_node/build_interop_stress.sh
  33. 125
      tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
  34. 57
      tools/dockerfile/stress_test/grpc_interop_stress_php/build_interop_stress.sh
  35. 103
      tools/dockerfile/stress_test/grpc_interop_stress_python/Dockerfile
  36. 49
      tools/dockerfile/stress_test/grpc_interop_stress_python/build_interop_stress.sh
  37. 114
      tools/dockerfile/stress_test/grpc_interop_stress_ruby/Dockerfile
  38. 52
      tools/dockerfile/stress_test/grpc_interop_stress_ruby/build_interop_stress.sh
  39. 206
      tools/gcp/stress_test/run_client.py
  40. 37
      tools/gcp/stress_test/run_node.sh
  41. 37
      tools/gcp/stress_test/run_ruby.sh
  42. 138
      tools/gcp/stress_test/run_server.py
  43. 217
      tools/gcp/stress_test/stress_test_utils.py
  44. 269
      tools/gcp/utils/kubernetes_api.py
  45. 37
      tools/jenkins/run_interop_stress.sh
  46. 4
      tools/profiling/microbenchmarks/bm_diff.py
  47. 8
      tools/run_tests/generated/tests.json
  48. 5
      tools/run_tests/performance/scenario_config.py
  49. 331
      tools/run_tests/run_stress_tests.py
  50. 76
      tools/run_tests/stress_test/README.md
  51. 25
      tools/run_tests/stress_test/STRESS_CLIENT_SPEC.md
  52. 85
      tools/run_tests/stress_test/configs/asan.json
  53. 91
      tools/run_tests/stress_test/configs/csharp.json
  54. 96
      tools/run_tests/stress_test/configs/go.json
  55. 98
      tools/run_tests/stress_test/configs/java.json
  56. 97
      tools/run_tests/stress_test/configs/node-cxx.json
  57. 96
      tools/run_tests/stress_test/configs/node.json
  58. 134
      tools/run_tests/stress_test/configs/opt-tsan-asan.json
  59. 85
      tools/run_tests/stress_test/configs/opt.json
  60. 93
      tools/run_tests/stress_test/configs/php-cxx.json
  61. 98
      tools/run_tests/stress_test/configs/python.json
  62. 92
      tools/run_tests/stress_test/configs/ruby.json
  63. 85
      tools/run_tests/stress_test/configs/tsan.json
  64. 59
      tools/run_tests/stress_test/print_summary.py
  65. 674
      tools/run_tests/stress_test/run_on_gke.py

@ -0,0 +1,31 @@
Please answer these questions before submitting your issue.
### Should this be an issue in the gRPC issue tracker?
Create new issues for bugs and feature requests. An issue needs to be actionable. General gRPC discussions and usage questions belong to:
- [grpc.io mailing list](https://groups.google.com/forum/#!forum/grpc-io)
- [StackOverflow, with `grpc` tag](http://stackoverflow.com/questions/tagged/grpc)
*Please don't double post your questions in more locations, we are monitoring both channels and the time spent de-duplicating questions can is better spent answering more user questions.*
### What version of gRPC and what language are you using?
### What operating system (Linux, Windows, …) and version?
### What runtime / compiler are you using (e.g. python version or version of gcc)
### What did you do?
If possible, provide a recipe for reproducing the error. Try being specific and include code snippets if helpful.
### What did you expect to see?
### What did you see instead?
Make sure you include information that can help us debug (full error message, exception listing, stack trace, logs).
### Anything else we should know about your project / environment?

@ -884,14 +884,23 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
GPR_TIMER_BEGIN("write_action_begin_locked", 0); GPR_TIMER_BEGIN("write_action_begin_locked", 0);
grpc_chttp2_transport *t = gt; grpc_chttp2_transport *t = gt;
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE); GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) { switch (t->closed ? GRPC_CHTTP2_NOTHING_TO_WRITE
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, : grpc_chttp2_begin_write(exec_ctx, t)) {
"begin writing"); case GRPC_CHTTP2_NOTHING_TO_WRITE:
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
} else {
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE, set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
"begin writing nothing"); "begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing"); GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
break;
case GRPC_CHTTP2_PARTIAL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
"begin writing partial");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
case GRPC_CHTTP2_FULL_WRITE:
set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
"begin writing");
grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
break;
} }
GPR_TIMER_END("write_action_begin_locked", 0); GPR_TIMER_END("write_action_begin_locked", 0);
} }
@ -2130,27 +2139,29 @@ static void end_all_the_calls(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, static void update_bdp(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
double bdp_dbl) { double bdp_dbl) {
uint32_t bdp; int32_t bdp;
if (bdp_dbl <= 0) { const int32_t kMinBDP = 128;
bdp = 0; if (bdp_dbl <= kMinBDP) {
} else if (bdp_dbl > UINT32_MAX) { bdp = kMinBDP;
bdp = UINT32_MAX; } else if (bdp_dbl > INT32_MAX) {
bdp = INT32_MAX;
} else { } else {
bdp = (uint32_t)(bdp_dbl); bdp = (int32_t)(bdp_dbl);
} }
int64_t delta = int64_t delta =
(int64_t)bdp - (int64_t)bdp -
(int64_t)t->settings[GRPC_LOCAL_SETTINGS] (int64_t)t->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]; [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
if (delta == 0 || (bdp != 0 && delta > -1024 && delta < 1024)) { if (delta == 0 || (delta > -bdp / 10 && delta < bdp / 10)) {
return; return;
} }
if (grpc_bdp_estimator_trace) { if (grpc_bdp_estimator_trace) {
gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string, gpr_log(GPR_DEBUG, "%s: update initial window size to %d", t->peer_string,
(int)bdp); (int)bdp);
} }
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, bdp); push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, bdp); (uint32_t)bdp);
push_setting(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE, (uint32_t)bdp);
} }
static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx, static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
@ -2543,7 +2554,7 @@ static void incoming_byte_stream_update_flow_control(grpc_exec_ctx *exec_ctx,
add_max_recv_bytes); add_max_recv_bytes);
if ((int64_t)s->incoming_window_delta + (int64_t)initial_window_size - if ((int64_t)s->incoming_window_delta + (int64_t)initial_window_size -
(int64_t)s->announce_window > (int64_t)s->announce_window >
(int64_t)initial_window_size / 2) { 2 * (int64_t)initial_window_size) {
write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK; write_type = GRPC_CHTTP2_STREAM_WRITE_PIGGYBACK;
} }
grpc_chttp2_become_writable(exec_ctx, t, s, write_type, grpc_chttp2_become_writable(exec_ctx, t, s, write_type,

@ -552,9 +552,14 @@ void grpc_chttp2_initiate_write(grpc_exec_ctx *exec_ctx,
grpc_chttp2_transport *t, grpc_chttp2_transport *t,
bool covered_by_poller, const char *reason); bool covered_by_poller, const char *reason);
/** Someone is unlocking the transport mutex: check to see if writes typedef enum {
are required, and frame them if so */ GRPC_CHTTP2_NOTHING_TO_WRITE,
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t); GRPC_CHTTP2_PARTIAL_WRITE,
GRPC_CHTTP2_FULL_WRITE,
} grpc_chttp2_begin_write_result;
grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t);
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_error *error); grpc_error *error);

@ -418,12 +418,7 @@ static grpc_error *update_incoming_window(grpc_exec_ctx *exec_ctx,
GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s, GRPC_CHTTP2_FLOW_DEBIT_STREAM_INCOMING_WINDOW_DELTA("parse", t, s,
incoming_frame_size); incoming_frame_size);
if ((int64_t)t->settings[GRPC_SENT_SETTINGS] if ((int64_t)s->incoming_window_delta - (int64_t)s->announce_window <= 0) {
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] +
(int64_t)s->incoming_window_delta - (int64_t)s->announce_window <=
(int64_t)t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] /
2) {
grpc_chttp2_become_writable(exec_ctx, t, s, grpc_chttp2_become_writable(exec_ctx, t, s,
GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED, GRPC_CHTTP2_STREAM_WRITE_INITIATE_UNCOVERED,
"window-update-required"); "window-update-required");

@ -160,19 +160,22 @@ static bool stream_ref_if_not_destroyed(gpr_refcount *r) {
return true; return true;
} }
/* How many bytes of incoming flow control would we like to advertise */
uint32_t grpc_chttp2_target_incoming_window(grpc_chttp2_transport *t) { uint32_t grpc_chttp2_target_incoming_window(grpc_chttp2_transport *t) {
return (uint32_t)GPR_MAX( return (uint32_t)GPR_MIN(
(int64_t)((1u << 31) - 1), (int64_t)((1u << 31) - 1),
t->stream_total_over_incoming_window + t->stream_total_over_incoming_window +
(int64_t)GPR_MAX(
t->settings[GRPC_SENT_SETTINGS] t->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE] - [GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE]);
t->stream_total_under_incoming_window, }
0));
/* How many bytes would we like to put on the wire during a single syscall */
static uint32_t target_write_size(grpc_chttp2_transport *t) {
return 1024 * 1024;
} }
bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
grpc_chttp2_transport *t) { grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t) {
grpc_chttp2_stream *s; grpc_chttp2_stream *s;
GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0); GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
@ -206,9 +209,20 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
} }
} }
bool partial_write = false;
/* for each grpc_chttp2_stream that's become writable, frame it's data /* for each grpc_chttp2_stream that's become writable, frame it's data
(according to available window sizes) and add to the output buffer */ (according to available window sizes) and add to the output buffer */
while (grpc_chttp2_list_pop_writable_stream(t, &s)) { while (true) {
if (t->outbuf.length > target_write_size(t)) {
partial_write = true;
break;
}
if (!grpc_chttp2_list_pop_writable_stream(t, &s)) {
break;
}
bool sent_initial_metadata = s->sent_initial_metadata; bool sent_initial_metadata = s->sent_initial_metadata;
bool now_writing = false; bool now_writing = false;
@ -395,7 +409,9 @@ bool grpc_chttp2_begin_write(grpc_exec_ctx *exec_ctx,
GPR_TIMER_END("grpc_chttp2_begin_write", 0); GPR_TIMER_END("grpc_chttp2_begin_write", 0);
return t->outbuf.count > 0; return t->outbuf.count > 0 ? (partial_write ? GRPC_CHTTP2_PARTIAL_WRITE
: GRPC_CHTTP2_FULL_WRITE)
: GRPC_CHTTP2_NOTHING_TO_WRITE;
} }
void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, void grpc_chttp2_end_write(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,

@ -44,6 +44,7 @@ void grpc_bdp_estimator_init(grpc_bdp_estimator *estimator, const char *name) {
estimator->estimate = 65536; estimator->estimate = 65536;
estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED; estimator->ping_state = GRPC_BDP_PING_UNSCHEDULED;
estimator->name = name; estimator->name = name;
estimator->bw_est = 0;
} }
bool grpc_bdp_estimator_get_estimate(grpc_bdp_estimator *estimator, bool grpc_bdp_estimator_get_estimate(grpc_bdp_estimator *estimator,
@ -84,16 +85,26 @@ void grpc_bdp_estimator_start_ping(grpc_bdp_estimator *estimator) {
GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_SCHEDULED); GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_SCHEDULED);
estimator->ping_state = GRPC_BDP_PING_STARTED; estimator->ping_state = GRPC_BDP_PING_STARTED;
estimator->accumulator = 0; estimator->accumulator = 0;
estimator->ping_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
} }
void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) { void grpc_bdp_estimator_complete_ping(grpc_bdp_estimator *estimator) {
gpr_timespec dt_ts =
gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), estimator->ping_start_time);
double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
double bw = dt > 0 ? ((double)estimator->accumulator / dt) : 0;
if (grpc_bdp_estimator_trace) { if (grpc_bdp_estimator_trace) {
gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64, gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
estimator->name, estimator->accumulator, estimator->estimate); " dt=%lf bw=%lfMbs bw_est=%lfMbs",
estimator->name, estimator->accumulator, estimator->estimate, dt,
bw / 125000.0, estimator->bw_est / 125000.0);
} }
GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_STARTED); GPR_ASSERT(estimator->ping_state == GRPC_BDP_PING_STARTED);
if (estimator->accumulator > 2 * estimator->estimate / 3) { if (estimator->accumulator > 2 * estimator->estimate / 3 &&
estimator->estimate *= 2; bw > estimator->bw_est) {
estimator->estimate =
GPR_MAX(estimator->accumulator, estimator->estimate * 2);
estimator->bw_est = bw;
if (grpc_bdp_estimator_trace) { if (grpc_bdp_estimator_trace) {
gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64,
estimator->name, estimator->estimate); estimator->name, estimator->estimate);

@ -34,6 +34,7 @@
#ifndef GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H #ifndef GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H
#define GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H #define GRPC_CORE_LIB_TRANSPORT_BDP_ESTIMATOR_H
#include <grpc/support/time.h>
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -52,6 +53,8 @@ typedef struct grpc_bdp_estimator {
grpc_bdp_estimator_ping_state ping_state; grpc_bdp_estimator_ping_state ping_state;
int64_t accumulator; int64_t accumulator;
int64_t estimate; int64_t estimate;
gpr_timespec ping_start_time;
double bw_est;
const char *name; const char *name;
} grpc_bdp_estimator; } grpc_bdp_estimator;

@ -66,7 +66,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Returns False under all other circumstances, for example: Returns False under all other circumstances, for example:
1. computation has begun and could not be canceled. 1. computation has begun and could not be canceled.
2. computation has finished 2. computation has finished
3. computation is scheduled for execution and it is impossible to determine its state without blocking. 3. computation is scheduled for execution and it is impossible to
determine its state without blocking.
""" """
raise NotImplementedError() raise NotImplementedError()
@ -123,8 +124,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Args: Args:
timeout: The length of time in seconds to wait for the computation to timeout: The length of time in seconds to wait for the computation to
finish or be cancelled. If None, the call will block until the computations's finish or be cancelled. If None, the call will block until the
termination. computations's termination.
Returns: Returns:
The return value of the computation. The return value of the computation.
@ -146,8 +147,8 @@ class Future(six.with_metaclass(abc.ABCMeta)):
Args: Args:
timeout: The length of time in seconds to wait for the computation to timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled. If None, the call will block until the computations's terminate or be cancelled. If None, the call will block until the
termination. computations's termination.
Returns: Returns:
The exception raised by the computation, or None if the computation did The exception raised by the computation, or None if the computation did
@ -363,9 +364,9 @@ class ChannelCredentials(object):
"""An encapsulation of the data required to create a secure Channel. """An encapsulation of the data required to create a secure Channel.
This class has no supported interface - it exists to define the type of its This class has no supported interface - it exists to define the type of its
instances and its instances exist to be passed to other functions. For example, instances and its instances exist to be passed to other functions. For
ssl_channel_credentials returns an instance, and secure_channel consumes an example, ssl_channel_credentials returns an instance, and secure_channel
instance of this class. consumes an instance of this class.
""" """
def __init__(self, credentials): def __init__(self, credentials):
@ -373,7 +374,8 @@ class ChannelCredentials(object):
class CallCredentials(object): class CallCredentials(object):
"""An encapsulation of the data required to assert an identity over a channel. """An encapsulation of the data required to assert an identity over a
channel.
A CallCredentials may be composed with ChannelCredentials to always assert A CallCredentials may be composed with ChannelCredentials to always assert
identity for every call over that Channel. identity for every call over that Channel.
@ -399,7 +401,8 @@ class AuthMetadataPluginCallback(six.with_metaclass(abc.ABCMeta)):
"""Callback object received by a metadata plugin.""" """Callback object received by a metadata plugin."""
def __call__(self, metadata, error): def __call__(self, metadata, error):
"""Inform the gRPC runtime of the metadata to construct a CallCredentials. """Inform the gRPC runtime of the metadata to construct a
CallCredentials.
Args: Args:
metadata: The :term:`metadata` used to construct the CallCredentials. metadata: The :term:`metadata` used to construct the CallCredentials.
@ -879,8 +882,8 @@ class GenericRpcHandler(six.with_metaclass(abc.ABCMeta)):
handler_call_details: A HandlerCallDetails describing the RPC. handler_call_details: A HandlerCallDetails describing the RPC.
Returns: Returns:
An RpcMethodHandler with which the RPC may be serviced if the implementation An RpcMethodHandler with which the RPC may be serviced if the
chooses to service this RPC, or None otherwise. implementation chooses to service this RPC, or None otherwise.
""" """
raise NotImplementedError() raise NotImplementedError()

@ -1,6 +1,6 @@
%YAML 1.2 %YAML 1.2
--- | --- |
# Copyright 2016, Google Inc. # Copyright 2017, Google Inc.
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -29,10 +29,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM golang:latest FROM golang:1.7
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../go_path.include"/> <%include file="../../go_path.include"/>
<%include file="../../python_deps.include"/>
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -1,6 +1,6 @@
%YAML 1.2 %YAML 1.2
--- | --- |
# Copyright 2016, Google Inc. # Copyright 2017, Google Inc.
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -29,12 +29,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie FROM golang:1.8
<%include file="../../apt_get_basic.include"/> <%include file="../../go_path.include"/>
<%include file="../../python_deps.include"/> <%include file="../../python_deps.include"/>
<%include file="../../node_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../run_tests_addons.include"/>
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -1,42 +0,0 @@
%YAML 1.2
--- |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../csharp_deps.include"/>
# Define the default command.
CMD ["bash"]

@ -1,41 +0,0 @@
%YAML 1.2
--- |
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../clang_update.include"/>
# Define the default command.
CMD ["bash"]

@ -1,41 +0,0 @@
%YAML 1.2
--- |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../java_deps.include"/>
# Define the default command.
CMD ["bash"]

@ -1,46 +0,0 @@
%YAML 1.2
--- |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../php_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# Define the default command.
CMD ["bash"]

@ -1,45 +0,0 @@
%YAML 1.2
--- |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../python_deps.include"/>
RUN pip install coverage
RUN pip install oauth2client
# Define the default command.
CMD ["bash"]

@ -1,42 +0,0 @@
%YAML 1.2
--- |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../python_deps.include"/>
<%include file="../../ccache_setup.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../ruby_deps.include"/>
# Define the default command.
CMD ["bash"]

@ -33,6 +33,7 @@
#include "src/core/lib/transport/bdp_estimator.h" #include "src/core/lib/transport/bdp_estimator.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h> #include <grpc/support/alloc.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
@ -64,6 +65,8 @@ static void add_samples(grpc_bdp_estimator *estimator, int64_t *samples,
GPR_ASSERT(grpc_bdp_estimator_add_incoming_bytes(estimator, samples[i]) == GPR_ASSERT(grpc_bdp_estimator_add_incoming_bytes(estimator, samples[i]) ==
false); false);
} }
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_millis(1, GPR_TIMESPAN)));
grpc_bdp_estimator_complete_ping(estimator); grpc_bdp_estimator_complete_ping(estimator);
} }
@ -123,24 +126,25 @@ static void test_get_estimate_random_values(size_t n) {
gpr_log(GPR_INFO, "test_get_estimate_random_values(%" PRIdPTR ")", n); gpr_log(GPR_INFO, "test_get_estimate_random_values(%" PRIdPTR ")", n);
grpc_bdp_estimator est; grpc_bdp_estimator est;
grpc_bdp_estimator_init(&est, "test"); grpc_bdp_estimator_init(&est, "test");
int min = INT_MAX; const int kMaxSample = 65535;
int max = 65535; // Windows rand() has limited range, make sure the ASSERT int min = kMaxSample;
// passes int max = 0;
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
int sample = rand(); int sample = rand() % (kMaxSample + 1);
if (sample < min) min = sample; if (sample < min) min = sample;
if (sample > max) max = sample; if (sample > max) max = sample;
add_sample(&est, sample); add_sample(&est, sample);
if (i >= 3) { if (i >= 3) {
gpr_log(GPR_DEBUG, "est:%" PRId64 " min:%d max:%d", get_estimate(&est), gpr_log(GPR_DEBUG, "est:%" PRId64 " min:%d max:%d", get_estimate(&est),
min, max); min, max);
GPR_ASSERT(get_estimate(&est) <= 2 * next_pow_2(max)); GPR_ASSERT(get_estimate(&est) <= GPR_MAX(65536, 2 * next_pow_2(max)));
} }
} }
} }
int main(int argc, char **argv) { int main(int argc, char **argv) {
grpc_test_init(argc, argv); grpc_test_init(argc, argv);
grpc_init();
test_noop(); test_noop();
test_get_estimate_no_samples(); test_get_estimate_no_samples();
test_get_estimate_1_sample(); test_get_estimate_1_sample();
@ -149,5 +153,6 @@ int main(int argc, char **argv) {
for (size_t i = 3; i < 1000; i = i * 3 / 2) { for (size_t i = 3; i < 1000; i = i * 3 / 2) {
test_get_estimate_random_values(i); test_get_estimate_random_values(i);
} }
grpc_shutdown();
return 0; return 0;
} }

@ -44,6 +44,8 @@
#include <grpc/support/useful.h> #include <grpc/support/useful.h>
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#define WRITE_BUFFER_SIZE (2 * 1024 * 1024)
typedef struct { typedef struct {
grpc_endpoint base; grpc_endpoint base;
double bytes_per_second; double bytes_per_second;
@ -55,6 +57,7 @@ typedef struct {
grpc_slice_buffer writing_buffer; grpc_slice_buffer writing_buffer;
grpc_error *error; grpc_error *error;
bool writing; bool writing;
grpc_closure *write_cb;
} trickle_endpoint; } trickle_endpoint;
static void te_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void te_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@ -63,10 +66,20 @@ static void te_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_endpoint_read(exec_ctx, te->wrapped, slices, cb); grpc_endpoint_read(exec_ctx, te->wrapped, slices, cb);
} }
static void maybe_call_write_cb_locked(grpc_exec_ctx *exec_ctx,
trickle_endpoint *te) {
if (te->write_cb != NULL && (te->error != GRPC_ERROR_NONE ||
te->write_buffer.length <= WRITE_BUFFER_SIZE)) {
grpc_closure_sched(exec_ctx, te->write_cb, GRPC_ERROR_REF(te->error));
te->write_cb = NULL;
}
}
static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer *slices, grpc_closure *cb) { grpc_slice_buffer *slices, grpc_closure *cb) {
trickle_endpoint *te = (trickle_endpoint *)ep; trickle_endpoint *te = (trickle_endpoint *)ep;
gpr_mu_lock(&te->mu); gpr_mu_lock(&te->mu);
GPR_ASSERT(te->write_cb == NULL);
if (te->write_buffer.length == 0) { if (te->write_buffer.length == 0) {
te->last_write = gpr_now(GPR_CLOCK_MONOTONIC); te->last_write = gpr_now(GPR_CLOCK_MONOTONIC);
} }
@ -74,7 +87,8 @@ static void te_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
grpc_slice_buffer_add(&te->write_buffer, grpc_slice_buffer_add(&te->write_buffer,
grpc_slice_copy(slices->slices[i])); grpc_slice_copy(slices->slices[i]));
} }
grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_REF(te->error)); te->write_cb = cb;
maybe_call_write_cb_locked(exec_ctx, te);
gpr_mu_unlock(&te->mu); gpr_mu_unlock(&te->mu);
} }
@ -102,6 +116,7 @@ static void te_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
if (te->error == GRPC_ERROR_NONE) { if (te->error == GRPC_ERROR_NONE) {
te->error = GRPC_ERROR_REF(why); te->error = GRPC_ERROR_REF(why);
} }
maybe_call_write_cb_locked(exec_ctx, te);
gpr_mu_unlock(&te->mu); gpr_mu_unlock(&te->mu);
grpc_endpoint_shutdown(exec_ctx, te->wrapped, why); grpc_endpoint_shutdown(exec_ctx, te->wrapped, why);
} }
@ -157,6 +172,7 @@ grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap,
te->base.vtable = &vtable; te->base.vtable = &vtable;
te->wrapped = wrap; te->wrapped = wrap;
te->bytes_per_second = bytes_per_second; te->bytes_per_second = bytes_per_second;
te->write_cb = NULL;
gpr_mu_init(&te->mu); gpr_mu_init(&te->mu);
grpc_slice_buffer_init(&te->write_buffer); grpc_slice_buffer_init(&te->write_buffer);
grpc_slice_buffer_init(&te->writing_buffer); grpc_slice_buffer_init(&te->writing_buffer);
@ -187,9 +203,18 @@ size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx *exec_ctx,
grpc_endpoint_write( grpc_endpoint_write(
exec_ctx, te->wrapped, &te->writing_buffer, exec_ctx, te->wrapped, &te->writing_buffer,
grpc_closure_create(te_finish_write, te, grpc_schedule_on_exec_ctx)); grpc_closure_create(te_finish_write, te, grpc_schedule_on_exec_ctx));
maybe_call_write_cb_locked(exec_ctx, te);
} }
} }
size_t backlog = te->write_buffer.length; size_t backlog = te->write_buffer.length;
gpr_mu_unlock(&te->mu); gpr_mu_unlock(&te->mu);
return backlog; return backlog;
} }
size_t grpc_trickle_get_backlog(grpc_endpoint *ep) {
trickle_endpoint *te = (trickle_endpoint *)ep;
gpr_mu_lock(&te->mu);
size_t backlog = te->write_buffer.length;
gpr_mu_unlock(&te->mu);
return backlog;
}

@ -43,4 +43,6 @@ grpc_endpoint *grpc_trickle_endpoint_create(grpc_endpoint *wrap,
size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx *exec_ctx, size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx *exec_ctx,
grpc_endpoint *endpoint); grpc_endpoint *endpoint);
size_t grpc_trickle_get_backlog(grpc_endpoint *endpoint);
#endif #endif

@ -92,7 +92,7 @@ cc_test(
cc_test( cc_test(
name = "bm_fullstack_trickle", name = "bm_fullstack_trickle",
srcs = ["bm_fullstack_trickle.cc"], srcs = ["bm_fullstack_trickle.cc"],
deps = [":helpers"], deps = [":helpers", "//external:gflags"],
) )
cc_test( cc_test(

@ -34,6 +34,8 @@
/* Benchmark gRPC end2end in various configurations */ /* Benchmark gRPC end2end in various configurations */
#include <benchmark/benchmark.h> #include <benchmark/benchmark.h>
#include <gflags/gflags.h>
#include <fstream>
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
#include "src/cpp/client/create_channel_internal.h" #include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h" #include "src/proto/grpc/testing/echo.grpc.pb.h"
@ -45,16 +47,57 @@ extern "C" {
#include "test/core/util/trickle_endpoint.h" #include "test/core/util/trickle_endpoint.h"
} }
DEFINE_bool(log, false, "Log state to CSV files");
DEFINE_int32(
warmup_megabytes, 1,
"Number of megabytes to pump before collecting flow control stats");
DEFINE_int32(
warmup_iterations, 100,
"Number of megabytes to pump before collecting flow control stats");
DEFINE_int32(warmup_max_time_seconds, 10,
"Maximum number of seconds to run warmup loop");
namespace grpc { namespace grpc {
namespace testing { namespace testing {
static void* tag(intptr_t x) { return reinterpret_cast<void*>(x); } static void* tag(intptr_t x) { return reinterpret_cast<void*>(x); }
template <class A0>
static void write_csv(std::ostream* out, A0&& a0) {
if (!out) return;
(*out) << a0 << "\n";
}
template <class A0, class... Arg>
static void write_csv(std::ostream* out, A0&& a0, Arg&&... arg) {
if (!out) return;
(*out) << a0 << ",";
write_csv(out, std::forward<Arg>(arg)...);
}
class TrickledCHTTP2 : public EndpointPairFixture { class TrickledCHTTP2 : public EndpointPairFixture {
public: public:
TrickledCHTTP2(Service* service, size_t megabits_per_second) TrickledCHTTP2(Service* service, size_t message_size,
: EndpointPairFixture(service, MakeEndpoints(megabits_per_second), size_t kilobits_per_second)
FixtureConfiguration()) {} : EndpointPairFixture(service, MakeEndpoints(kilobits_per_second),
FixtureConfiguration()) {
if (FLAGS_log) {
std::ostringstream fn;
fn << "trickle." << message_size << "." << kilobits_per_second << ".csv";
log_.reset(new std::ofstream(fn.str().c_str()));
write_csv(log_.get(), "t", "iteration", "client_backlog",
"server_backlog", "client_t_stall", "client_s_stall",
"server_t_stall", "server_s_stall", "client_t_outgoing",
"server_t_outgoing", "client_t_incoming", "server_t_incoming",
"client_s_outgoing_delta", "server_s_outgoing_delta",
"client_s_incoming_delta", "server_s_incoming_delta",
"client_s_announce_window", "server_s_announce_window",
"client_peer_iws", "client_local_iws", "client_sent_iws",
"client_acked_iws", "server_peer_iws", "server_local_iws",
"server_sent_iws", "server_acked_iws", "client_queued_bytes",
"server_queued_bytes");
}
}
void AddToLabel(std::ostream& out, benchmark::State& state) { void AddToLabel(std::ostream& out, benchmark::State& state) {
out << " writes/iter:" out << " writes/iter:"
@ -75,7 +118,58 @@ class TrickledCHTTP2 : public EndpointPairFixture {
(double)state.iterations()); (double)state.iterations());
} }
void Step() { void Log(int64_t iteration) {
auto now = gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), start_);
grpc_chttp2_transport* client =
reinterpret_cast<grpc_chttp2_transport*>(client_transport_);
grpc_chttp2_transport* server =
reinterpret_cast<grpc_chttp2_transport*>(server_transport_);
grpc_chttp2_stream* client_stream =
client->stream_map.count == 1
? static_cast<grpc_chttp2_stream*>(client->stream_map.values[0])
: nullptr;
grpc_chttp2_stream* server_stream =
server->stream_map.count == 1
? static_cast<grpc_chttp2_stream*>(server->stream_map.values[0])
: nullptr;
write_csv(
log_.get(), static_cast<double>(now.tv_sec) +
1e-9 * static_cast<double>(now.tv_nsec),
iteration, grpc_trickle_get_backlog(endpoint_pair_.client),
grpc_trickle_get_backlog(endpoint_pair_.server),
client->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr,
client->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT].head != nullptr,
server->lists[GRPC_CHTTP2_LIST_STALLED_BY_STREAM].head != nullptr,
client->outgoing_window, server->outgoing_window,
client->incoming_window, server->incoming_window,
client_stream ? client_stream->outgoing_window_delta : -1,
server_stream ? server_stream->outgoing_window_delta : -1,
client_stream ? client_stream->incoming_window_delta : -1,
server_stream ? server_stream->incoming_window_delta : -1,
client_stream ? client_stream->announce_window : -1,
server_stream ? server_stream->announce_window : -1,
client->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
client->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
client->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
client->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
server->settings[GRPC_PEER_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
server->settings[GRPC_LOCAL_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
server->settings[GRPC_SENT_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
server->settings[GRPC_ACKED_SETTINGS]
[GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE],
client_stream ? client_stream->flow_controlled_buffer.length : 0,
server_stream ? server_stream->flow_controlled_buffer.length : 0);
}
void Step(bool update_stats) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t client_backlog = size_t client_backlog =
grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client); grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client);
@ -83,11 +177,13 @@ class TrickledCHTTP2 : public EndpointPairFixture {
grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.server); grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.server);
grpc_exec_ctx_finish(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx);
if (update_stats) {
UpdateStats((grpc_chttp2_transport*)client_transport_, &client_stats_, UpdateStats((grpc_chttp2_transport*)client_transport_, &client_stats_,
client_backlog); client_backlog);
UpdateStats((grpc_chttp2_transport*)server_transport_, &server_stats_, UpdateStats((grpc_chttp2_transport*)server_transport_, &server_stats_,
server_backlog); server_backlog);
} }
}
private: private:
grpc_passthru_endpoint_stats stats_; grpc_passthru_endpoint_stats stats_;
@ -97,6 +193,8 @@ class TrickledCHTTP2 : public EndpointPairFixture {
}; };
Stats client_stats_; Stats client_stats_;
Stats server_stats_; Stats server_stats_;
std::unique_ptr<std::ofstream> log_;
gpr_timespec start_ = gpr_now(GPR_CLOCK_MONOTONIC);
grpc_endpoint_pair MakeEndpoints(size_t kilobits) { grpc_endpoint_pair MakeEndpoints(size_t kilobits) {
grpc_endpoint_pair p; grpc_endpoint_pair p;
@ -123,13 +221,15 @@ class TrickledCHTTP2 : public EndpointPairFixture {
// force library initialization // force library initialization
auto& force_library_initialization = Library::get(); auto& force_library_initialization = Library::get();
static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok) { static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok,
int64_t iteration) {
while (true) { while (true) {
fixture->Log(iteration);
switch (fixture->cq()->AsyncNext( switch (fixture->cq()->AsyncNext(
t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), t, ok, gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_micros(100, GPR_TIMESPAN)))) { gpr_time_from_micros(100, GPR_TIMESPAN)))) {
case CompletionQueue::TIMEOUT: case CompletionQueue::TIMEOUT:
fixture->Step(); fixture->Step(iteration != -1);
break; break;
case CompletionQueue::SHUTDOWN: case CompletionQueue::SHUTDOWN:
GPR_ASSERT(false); GPR_ASSERT(false);
@ -143,7 +243,7 @@ static void TrickleCQNext(TrickledCHTTP2* fixture, void** t, bool* ok) {
static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) { static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
EchoTestService::AsyncService service; EchoTestService::AsyncService service;
std::unique_ptr<TrickledCHTTP2> fixture( std::unique_ptr<TrickledCHTTP2> fixture(
new TrickledCHTTP2(&service, state.range(1))); new TrickledCHTTP2(&service, state.range(0), state.range(1)));
{ {
EchoResponse send_response; EchoResponse send_response;
EchoResponse recv_response; EchoResponse recv_response;
@ -163,18 +263,19 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
void* t; void* t;
bool ok; bool ok;
while (need_tags) { while (need_tags) {
TrickleCQNext(fixture.get(), &t, &ok); TrickleCQNext(fixture.get(), &t, &ok, -1);
GPR_ASSERT(ok); GPR_ASSERT(ok);
int i = (int)(intptr_t)t; int i = (int)(intptr_t)t;
GPR_ASSERT(need_tags & (1 << i)); GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i); need_tags &= ~(1 << i);
} }
request_rw->Read(&recv_response, tag(0)); request_rw->Read(&recv_response, tag(0));
while (state.KeepRunning()) { auto inner_loop = [&](bool in_warmup) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0); GPR_TIMER_SCOPE("BenchmarkCycle", 0);
response_rw.Write(send_response, tag(1)); response_rw.Write(send_response, tag(1));
while (true) { while (true) {
TrickleCQNext(fixture.get(), &t, &ok); TrickleCQNext(fixture.get(), &t, &ok,
in_warmup ? -1 : state.iterations());
if (t == tag(0)) { if (t == tag(0)) {
request_rw->Read(&recv_response, tag(0)); request_rw->Read(&recv_response, tag(0));
} else if (t == tag(1)) { } else if (t == tag(1)) {
@ -183,11 +284,26 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
GPR_ASSERT(false); GPR_ASSERT(false);
} }
} }
};
gpr_timespec warmup_start = gpr_now(GPR_CLOCK_MONOTONIC);
for (int i = 0;
i < GPR_MAX(FLAGS_warmup_iterations, FLAGS_warmup_megabytes * 1024 *
1024 / (14 + state.range(0)));
i++) {
inner_loop(true);
if (gpr_time_cmp(gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), warmup_start),
gpr_time_from_seconds(FLAGS_warmup_max_time_seconds,
GPR_TIMESPAN)) > 0) {
break;
}
}
while (state.KeepRunning()) {
inner_loop(false);
} }
response_rw.Finish(Status::OK, tag(1)); response_rw.Finish(Status::OK, tag(1));
need_tags = (1 << 0) | (1 << 1); need_tags = (1 << 0) | (1 << 1);
while (need_tags) { while (need_tags) {
TrickleCQNext(fixture.get(), &t, &ok); TrickleCQNext(fixture.get(), &t, &ok, -1);
int i = (int)(intptr_t)t; int i = (int)(intptr_t)t;
GPR_ASSERT(need_tags & (1 << i)); GPR_ASSERT(need_tags & (1 << i));
need_tags &= ~(1 << i); need_tags &= ~(1 << i);
@ -204,10 +320,10 @@ static void BM_PumpStreamServerToClient_Trickle(benchmark::State& state) {
static void TrickleArgs(benchmark::internal::Benchmark* b) { static void TrickleArgs(benchmark::internal::Benchmark* b) {
for (int i = 1; i <= 128 * 1024 * 1024; i *= 8) { for (int i = 1; i <= 128 * 1024 * 1024; i *= 8) {
for (int j = 1; j <= 128 * 1024 * 1024; j *= 8) { for (int j = 64; j <= 128 * 1024 * 1024; j *= 8) {
double expected_time = double expected_time =
static_cast<double>(14 + i) / (125.0 * static_cast<double>(j)); static_cast<double>(14 + i) / (125.0 * static_cast<double>(j));
if (expected_time > 0.01) continue; if (expected_time > 2.0) continue;
b->Args({i, j}); b->Args({i, j});
} }
} }
@ -217,4 +333,8 @@ BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(TrickleArgs);
} }
} }
BENCHMARK_MAIN(); int main(int argc, char** argv) {
::benchmark::Initialize(&argc, argv);
::google::ParseCommandLineFlags(&argc, &argv, false);
::benchmark::RunSpecifiedBenchmarks();
}

@ -1,4 +1,4 @@
# Copyright 2016, Google Inc. # Copyright 2017, Google Inc.
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -27,12 +27,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM golang:latest FROM golang:1.7
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
#==================== #====================
# Python dependencies # Python dependencies
@ -49,8 +47,5 @@ RUN pip install pip --upgrade
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -1,4 +1,3 @@
#!/bin/bash
# Copyright 2017, Google Inc. # Copyright 2017, Google Inc.
# All rights reserved. # All rights reserved.
# #
@ -27,5 +26,26 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
for img in `docker images | grep \<none\> | awk '{print $3 }'` ; do docker rmi -f $img; done
FROM golang:1.8
# Using login shell removes Go from path, so we add it.
RUN ln -s /usr/local/go/bin/go /usr/local/bin
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Define the default command.
CMD ["bash"]

@ -1,117 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#================
# C# dependencies
# Update to a newer version of mono
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list
# Install dependencies
RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \
mono-devel \
ca-certificates-mono \
nuget \
&& apt-get clean
RUN nuget update -self
# Define the default command.
CMD ["bash"]

@ -1,51 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds C# interop server and client in a base image.
set -e
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# Copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
# Build C++ metrics client (to query the metrics from csharp stress client)
make metrics_client -j
# Build C# interop client & server
tools/run_tests/run_tests.py -l csharp -c dbg --build_only

@ -1,132 +0,0 @@
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#=================
# Update clang to a version with improved tsan and fuzzing capabilities
RUN apt-get update && apt-get -y install python cmake && apt-get clean
RUN git clone -n -b release_38 http://llvm.org/git/llvm.git && \
cd llvm && git checkout ad57503 && cd ..
RUN git clone -n -b release_38 http://llvm.org/git/clang.git && \
cd clang && git checkout ad2c56e && cd ..
RUN git clone -n -b release_38 http://llvm.org/git/compiler-rt.git && \
cd compiler-rt && git checkout 3176922 && cd ..
RUN git clone -n -b release_38 \
http://llvm.org/git/clang-tools-extra.git && cd clang-tools-extra && \
git checkout c288525 && cd ..
RUN git clone -n -b release_38 http://llvm.org/git/libcxx.git && \
cd libcxx && git checkout fda3549 && cd ..
RUN git clone -n -b release_38 http://llvm.org/git/libcxxabi.git && \
cd libcxxabi && git checkout 8d4e51d && cd ..
RUN mv clang llvm/tools
RUN mv compiler-rt llvm/projects
RUN mv clang-tools-extra llvm/tools/clang/tools
RUN mv libcxx llvm/projects
RUN mv libcxxabi llvm/projects
RUN mkdir llvm-build
RUN cd llvm-build && cmake \
-DCMAKE_BUILD_TYPE:STRING=Release \
-DCMAKE_INSTALL_PREFIX:STRING=/usr \
-DLLVM_TARGETS_TO_BUILD:STRING=X86 \
../llvm
RUN make -C llvm-build -j 12 && make -C llvm-build install && rm -rf llvm-build
# Define the default command.
CMD ["bash"]

@ -1,51 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds C++ interop server and client in a base image.
set -e
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
make install-certs
BUILD_TYPE=${BUILD_TYPE:=opt}
# build C++ interop stress client, interop client and server
make CONFIG=$BUILD_TYPE stress_test metrics_client interop_client interop_server

@ -1,62 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds Go interop server, Stress client and metrics client in a base image.
set -e
# Clone just the grpc-go source code without any dependencies.
# We are cloning from a local git repo that contains the right revision
# to test instead of using "go get" to download from Github directly.
git clone --recursive /var/local/jenkins/grpc-go src/google.golang.org/grpc
# Clone the 'grpc' repo. We just need this for the wrapper scripts under
# grpc/tools/gcp/stress_tests
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
# Get dependencies from GitHub
# NOTE: once grpc-go dependencies change, this needs to be updated manually
# but we don't expect this to happen any time soon.
go get github.com/golang/protobuf/proto
go get golang.org/x/net/context
go get golang.org/x/net/trace
go get golang.org/x/oauth2
go get google.golang.org/cloud
# Build the interop server, stress client and stress metrics client
(cd src/google.golang.org/grpc/interop/server && go install)
(cd src/google.golang.org/grpc/stress/client && go install)
(cd src/google.golang.org/grpc/stress/metrics_client && go install)

@ -1,117 +0,0 @@
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
# Install JDK 8 and Git
#
RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections && \
echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list && \
echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list && \
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
RUN apt-get update && apt-get -y install \
git \
libapr1 \
oracle-java8-installer \
&& \
apt-get clean && rm -r /var/cache/oracle-jdk8-installer/
ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
ENV PATH $PATH:$JAVA_HOME/bin
# Define the default command.
CMD ["bash"]

@ -1,55 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds C++ interop server and client in a base image.
set -e
mkdir -p /var/local/git
# grpc-java repo
git clone --recursive --depth 1 /var/local/jenkins/grpc-java /var/local/git/grpc-java
# grpc repo (for metrics client and for the stress test wrapper scripts)
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# Copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
# First build the metrics client in grpc repo
cd /var/local/git/grpc
make metrics_client
# Build all interop test targets (which includes interop server and stress test
# client) in grpc-java repo
cd /var/local/git/grpc-java
./gradlew :grpc-interop-testing:installDist -PskipCodegen=true

@ -1,109 +0,0 @@
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
#==================
# Node dependencies
# Install nvm
RUN touch .profile
RUN curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.25.4/install.sh | bash
# Install all versions of node that we want to test
RUN /bin/bash -l -c "nvm install 0.12 && npm config set cache /tmp/npm-cache"
RUN /bin/bash -l -c "nvm install 4 && npm config set cache /tmp/npm-cache"
RUN /bin/bash -l -c "nvm install 5 && npm config set cache /tmp/npm-cache"
RUN /bin/bash -l -c "nvm alias default 4"
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
RUN mkdir /var/local/jenkins
# Define the default command.
CMD ["bash"]

@ -1,48 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds Node interop server and client in a base image.
set -e
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
# build Node interop client & server
npm install -g node-gyp
npm install --unsafe-perm --build-from-source

@ -1,125 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
#==================
# Ruby dependencies
# Install rvm
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
RUN \curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.1
RUN /bin/bash -l -c "rvm install ruby-2.1"
RUN /bin/bash -l -c "rvm use --default ruby-2.1"
RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#=================
# PHP dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
git php5 php5-dev phpunit unzip
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
RUN mkdir /var/local/jenkins
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# Define the default command.
CMD ["bash"]

@ -1,57 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds PHP interop server and client in a base image.
set -ex
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
make install-certs
# gRPC core and protobuf need to be installed
make install
(cd src/php/ext/grpc && phpize && ./configure && make)
(cd third_party/protobuf && make install)
(cd src/php && php -d extension=ext/grpc/modules/grpc.so /usr/local/bin/composer install)
(cd src/php && ./bin/generate_proto_php.sh)

@ -1,103 +0,0 @@
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
RUN pip install coverage
RUN pip install oauth2client
# Define the default command.
CMD ["bash"]

@ -1,49 +0,0 @@
#!/bin/bash
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds Python interop server and client in a base image.
set -e
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
tools/run_tests/run_tests.py -l python -c opt --build_only
# Build c++ interop client
make metrics_client -j

@ -1,114 +0,0 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#====================
# Python dependencies
# Install dependencies
RUN apt-get update && apt-get install -y \
python-all-dev \
python3-all-dev \
python-pip
# Install Python packages from PyPI
RUN pip install pip --upgrade
RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#=================
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#==================
# Ruby dependencies
# Install rvm
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
RUN \curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.1
RUN /bin/bash -l -c "rvm install ruby-2.1"
RUN /bin/bash -l -c "rvm use --default ruby-2.1"
RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
# Define the default command.
CMD ["bash"]

@ -1,52 +0,0 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds Ruby interop server and client in a base image.
set -e
mkdir -p /var/local/git
git clone /var/local/jenkins/grpc /var/local/git/grpc
# clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc/ && git submodule foreach 'cd /var/local/git/grpc \
&& git submodule update --init --reference /var/local/jenkins/grpc/${name} \
${name}')
# Copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
rvm --default use ruby-2.1
# Build Ruby interop client and server
(cd src/ruby && gem update bundler && bundle && rake compile)
# Build c++ metrics client to query the metrics from ruby stress client
make metrics_client -j

@ -1,206 +0,0 @@
#!/usr/bin/env python2.7
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import re
import resource
import select
import subprocess
import sys
import time
from stress_test_utils import EventType
from stress_test_utils import BigQueryHelper
# TODO (sree): Write a python grpc client to directly query the metrics instead
# of calling metrics_client
def _get_qps(metrics_cmd):
qps = 0
try:
# Note: gpr_log() writes even non-error messages to stderr stream. So it is
# important that we set stderr=subprocess.STDOUT
p = subprocess.Popen(args=metrics_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
retcode = p.wait()
(out_str, err_str) = p.communicate()
if retcode != 0:
print 'Error in reading metrics information'
print 'Output: ', out_str
else:
# The overall qps is printed at the end of the line
m = re.search('\d+$', out_str)
qps = int(m.group()) if m else 0
except Exception as ex:
print 'Exception while reading metrics information: ' + str(ex)
return qps
def run_client():
"""This is a wrapper around the stress test client and performs the following:
1) Create the following two tables in Big Query:
(i) Summary table: To record events like the test started, completed
successfully or failed
(ii) Qps table: To periodically record the QPS sent by this client
2) Start the stress test client and add a row in the Big Query summary
table
3) Once every few seconds (as specificed by the poll_interval_secs) poll
the status of the stress test client process and perform the
following:
3.1) If the process is still running, get the current qps by invoking
the metrics client program and add a row in the Big Query
Qps table. Sleep for a duration specified by poll_interval_secs
3.2) If the process exited successfully, add a row in the Big Query
Summary table and exit
3.3) If the process failed, add a row in Big Query summary table and
wait forever.
NOTE: This script typically runs inside a GKE pod which means
that the pod gets destroyed when the script exits. However, in
case the stress test client fails, we would not want the pod to
be destroyed (since we might want to connect to the pod for
examining logs). This is the reason why the script waits forever
in case of failures
"""
# Set the 'core file' size to 'unlimited' so that 'core' files are generated
# if the client crashes (Note: This is not relevant for Java and Go clients)
resource.setrlimit(resource.RLIMIT_CORE,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
env = dict(os.environ)
image_type = env['STRESS_TEST_IMAGE_TYPE']
stress_client_cmd = env['STRESS_TEST_CMD'].split()
args_str = env['STRESS_TEST_ARGS_STR']
metrics_client_cmd = env['METRICS_CLIENT_CMD'].split()
metrics_client_args_str = env['METRICS_CLIENT_ARGS_STR']
run_id = env['RUN_ID']
pod_name = env['POD_NAME']
logfile_name = env.get('LOGFILE_NAME')
poll_interval_secs = float(env['POLL_INTERVAL_SECS'])
project_id = env['GCP_PROJECT_ID']
dataset_id = env['DATASET_ID']
summary_table_id = env['SUMMARY_TABLE_ID']
qps_table_id = env['QPS_TABLE_ID']
# The following parameter is to inform us whether the stress client runs
# forever until forcefully stopped or will it naturally stop after sometime.
# This way, we know that the stress client process should not terminate (even
# if it does with a success exit code) and flag the termination as a failure
will_run_forever = env.get('WILL_RUN_FOREVER', '1')
bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
dataset_id, summary_table_id, qps_table_id)
bq_helper.initialize()
# Create BigQuery Dataset and Tables: Summary Table and Metrics Table
if not bq_helper.setup_tables():
print 'Error in creating BigQuery tables'
return
start_time = datetime.datetime.now()
logfile = None
details = 'Logging to stdout'
if logfile_name is not None:
print 'Opening logfile: %s ...' % logfile_name
details = 'Logfile: %s' % logfile_name
logfile = open(logfile_name, 'w')
metrics_cmd = metrics_client_cmd + [x
for x in metrics_client_args_str.split()]
stress_cmd = stress_client_cmd + [x for x in args_str.split()]
details = '%s, Metrics command: %s, Stress client command: %s' % (
details, str(metrics_cmd), str(stress_cmd))
# Update status that the test is starting (in the status table)
bq_helper.insert_summary_row(EventType.STARTING, details)
print 'Launching process %s ...' % stress_cmd
stress_p = subprocess.Popen(args=stress_cmd,
stdout=logfile,
stderr=subprocess.STDOUT)
qps_history = [1, 1, 1] # Maintain the last 3 qps readings
qps_history_idx = 0 # Index into the qps_history list
is_running_status_written = False
is_error = False
while True:
# Check if stress_client is still running. If so, collect metrics and upload
# to BigQuery status table
# If stress_p.poll() is not None, it means that the stress client terminated
if stress_p.poll() is not None:
end_time = datetime.datetime.now().isoformat()
event_type = EventType.SUCCESS
details = 'End time: %s' % end_time
if will_run_forever == '1' or stress_p.returncode != 0:
event_type = EventType.FAILURE
details = 'Return code = %d. End time: %s' % (stress_p.returncode,
end_time)
is_error = True
bq_helper.insert_summary_row(event_type, details)
print details
break
if not is_running_status_written:
bq_helper.insert_summary_row(EventType.RUNNING, '')
is_running_status_written = True
# Stress client still running. Get metrics
qps = _get_qps(metrics_cmd)
qps_recorded_at = datetime.datetime.now().isoformat()
print 'qps: %d at %s' % (qps, qps_recorded_at)
# If QPS has been zero for the last 3 iterations, flag it as error and exit
qps_history[qps_history_idx] = qps
qps_history_idx = (qps_history_idx + 1) % len(qps_history)
if sum(qps_history) == 0:
details = 'QPS has been zero for the last %d seconds - as of : %s' % (
poll_interval_secs * 3, qps_recorded_at)
is_error = True
bq_helper.insert_summary_row(EventType.FAILURE, details)
print details
break
# Upload qps metrics to BiqQuery
bq_helper.insert_qps_row(qps, qps_recorded_at)
time.sleep(poll_interval_secs)
if is_error:
print 'Waiting indefinitely..'
select.select([], [], [])
print 'Completed'
return
if __name__ == '__main__':
run_client()

@ -1,37 +0,0 @@
#!/bin/bash
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a wrapper script that was created to help run_server.py and
# run_client.py to launch 'node js' stress clients and stress servers
source ~/.nvm/nvm.sh
set -ex
$@

@ -1,37 +0,0 @@
#!/bin/bash
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is a wrapper script that was created to help run_server.py and
# run_client.py to launch 'node js' stress clients and stress servers
source /etc/profile.d/rvm.sh
set -ex
$@

@ -1,138 +0,0 @@
#!/usr/bin/env python2.7
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import resource
import select
import subprocess
import sys
import time
from stress_test_utils import BigQueryHelper
from stress_test_utils import EventType
def run_server():
"""This is a wrapper around the interop server and performs the following:
1) Create a 'Summary table' in Big Query to record events like the server
started, completed successfully or failed. NOTE: This also creates
another table called the QPS table which is currently NOT needed on the
server (it is needed on the stress test clients)
2) Start the server process and add a row in Big Query summary table
3) Wait for the server process to terminate. The server process does not
terminate unless there is an error.
If the server process terminated with a failure, add a row in Big Query
and wait forever.
NOTE: This script typically runs inside a GKE pod which means that the
pod gets destroyed when the script exits. However, in case the server
process fails, we would not want the pod to be destroyed (since we
might want to connect to the pod for examining logs). This is the
reason why the script waits forever in case of failures.
"""
# Set the 'core file' size to 'unlimited' so that 'core' files are generated
# if the server crashes (Note: This is not relevant for Java and Go servers)
resource.setrlimit(resource.RLIMIT_CORE,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
# Read the parameters from environment variables
env = dict(os.environ)
run_id = env['RUN_ID'] # The unique run id for this test
image_type = env['STRESS_TEST_IMAGE_TYPE']
stress_server_cmd = env['STRESS_TEST_CMD'].split()
args_str = env['STRESS_TEST_ARGS_STR']
pod_name = env['POD_NAME']
project_id = env['GCP_PROJECT_ID']
dataset_id = env['DATASET_ID']
summary_table_id = env['SUMMARY_TABLE_ID']
qps_table_id = env['QPS_TABLE_ID']
# The following parameter is to inform us whether the server runs forever
# until forcefully stopped or will it naturally stop after sometime.
# This way, we know that the process should not terminate (even if it does
# with a success exit code) and flag any termination as a failure.
will_run_forever = env.get('WILL_RUN_FOREVER', '1')
logfile_name = env.get('LOGFILE_NAME')
print('pod_name: %s, project_id: %s, run_id: %s, dataset_id: %s, '
'summary_table_id: %s, qps_table_id: %s') % (pod_name, project_id,
run_id, dataset_id,
summary_table_id,
qps_table_id)
bq_helper = BigQueryHelper(run_id, image_type, pod_name, project_id,
dataset_id, summary_table_id, qps_table_id)
bq_helper.initialize()
# Create BigQuery Dataset and Tables: Summary Table and Metrics Table
if not bq_helper.setup_tables():
print 'Error in creating BigQuery tables'
return
start_time = datetime.datetime.now()
logfile = None
details = 'Logging to stdout'
if logfile_name is not None:
print 'Opening log file: ', logfile_name
logfile = open(logfile_name, 'w')
details = 'Logfile: %s' % logfile_name
stress_cmd = stress_server_cmd + [x for x in args_str.split()]
details = '%s, Stress server command: %s' % (details, str(stress_cmd))
# Update status that the test is starting (in the status table)
bq_helper.insert_summary_row(EventType.STARTING, details)
print 'Launching process %s ...' % stress_cmd
stress_p = subprocess.Popen(args=stress_cmd,
stdout=logfile,
stderr=subprocess.STDOUT)
# Update the status to running if subprocess.Popen launched the server
if stress_p.poll() is None:
bq_helper.insert_summary_row(EventType.RUNNING, '')
# Wait for the server process to terminate
returncode = stress_p.wait()
if will_run_forever == '1' or returncode != 0:
end_time = datetime.datetime.now().isoformat()
event_type = EventType.FAILURE
details = 'Returncode: %d; End time: %s' % (returncode, end_time)
bq_helper.insert_summary_row(event_type, details)
print 'Waiting indefinitely..'
select.select([], [], [])
return returncode
if __name__ == '__main__':
run_server()

@ -1,217 +0,0 @@
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import json
import os
import re
import select
import subprocess
import sys
import time
# Import big_query_utils module
bq_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../utils'))
sys.path.append(bq_utils_dir)
import big_query_utils as bq_utils
class EventType:
STARTING = 'STARTING'
RUNNING = 'RUNNING'
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
class BigQueryHelper:
"""Helper class for the stress test wrappers to interact with BigQuery.
"""
def __init__(self, run_id, image_type, pod_name, project_id, dataset_id,
summary_table_id, qps_table_id):
self.run_id = run_id
self.image_type = image_type
self.pod_name = pod_name
self.project_id = project_id
self.dataset_id = dataset_id
self.summary_table_id = summary_table_id
self.qps_table_id = qps_table_id
def initialize(self):
self.bq = bq_utils.create_big_query()
def setup_tables(self):
return bq_utils.create_dataset(self.bq, self.project_id, self.dataset_id) \
and self.__create_summary_table() \
and self.__create_qps_table()
def insert_summary_row(self, event_type, details):
row_values_dict = {
'run_id': self.run_id,
'image_type': self.image_type,
'pod_name': self.pod_name,
'event_date': datetime.datetime.now().isoformat(),
'event_type': event_type,
'details': details
}
# row_unique_id is something that uniquely identifies the row (BigQuery uses
# it for duplicate detection).
row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, event_type)
row = bq_utils.make_row(row_unique_id, row_values_dict)
return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
self.summary_table_id, [row])
def insert_qps_row(self, qps, recorded_at):
row_values_dict = {
'run_id': self.run_id,
'pod_name': self.pod_name,
'recorded_at': recorded_at,
'qps': qps
}
# row_unique_id is something that uniquely identifies the row (BigQuery uses
# it for duplicate detection).
row_unique_id = '%s_%s_%s' % (self.run_id, self.pod_name, recorded_at)
row = bq_utils.make_row(row_unique_id, row_values_dict)
return bq_utils.insert_rows(self.bq, self.project_id, self.dataset_id,
self.qps_table_id, [row])
def check_if_any_tests_failed(self, num_query_retries=3, timeout_msec=30000):
query = ('SELECT event_type FROM %s.%s WHERE run_id = \'%s\' AND '
'event_type="%s"') % (self.dataset_id, self.summary_table_id,
self.run_id, EventType.FAILURE)
page = None
try:
query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
job_id = query_job['jobReference']['jobId']
project_id = query_job['jobReference']['projectId']
page = self.bq.jobs().getQueryResults(
projectId=project_id,
jobId=job_id,
timeoutMs=timeout_msec).execute(num_retries=num_query_retries)
if not page['jobComplete']:
print('TIMEOUT ERROR: The query %s timed out. Current timeout value is'
' %d msec. Returning False (i.e assuming there are no failures)'
) % (query, timeout_msec)
return False
num_failures = int(page['totalRows'])
print 'num rows: ', num_failures
return num_failures > 0
except:
print 'Exception in check_if_any_tests_failed(). Info: ', sys.exc_info()
print 'Query: ', query
def print_summary_records(self, num_query_retries=3):
line = '-' * 120
print line
print 'Summary records'
print 'Run Id: ', self.run_id
print 'Dataset Id: ', self.dataset_id
print line
query = ('SELECT pod_name, image_type, event_type, event_date, details'
' FROM %s.%s WHERE run_id = \'%s\' ORDER by event_date;') % (
self.dataset_id, self.summary_table_id, self.run_id)
query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
print '{:<25} {:<12} {:<12} {:<30} {}'.format('Pod name', 'Image type',
'Event type', 'Date',
'Details')
print line
page_token = None
while True:
page = self.bq.jobs().getQueryResults(
pageToken=page_token,
**query_job['jobReference']).execute(num_retries=num_query_retries)
rows = page.get('rows', [])
for row in rows:
print '{:<25} {:<12} {:<12} {:<30} {}'.format(row['f'][0]['v'],
row['f'][1]['v'],
row['f'][2]['v'],
row['f'][3]['v'],
row['f'][4]['v'])
page_token = page.get('pageToken')
if not page_token:
break
def print_qps_records(self, num_query_retries=3):
line = '-' * 80
print line
print 'QPS Summary'
print 'Run Id: ', self.run_id
print 'Dataset Id: ', self.dataset_id
print line
query = (
'SELECT pod_name, recorded_at, qps FROM %s.%s WHERE run_id = \'%s\' '
'ORDER by recorded_at;') % (self.dataset_id, self.qps_table_id,
self.run_id)
query_job = bq_utils.sync_query_job(self.bq, self.project_id, query)
print '{:<25} {:30} {}'.format('Pod name', 'Recorded at', 'Qps')
print line
page_token = None
while True:
page = self.bq.jobs().getQueryResults(
pageToken=page_token,
**query_job['jobReference']).execute(num_retries=num_query_retries)
rows = page.get('rows', [])
for row in rows:
print '{:<25} {:30} {}'.format(row['f'][0]['v'], row['f'][1]['v'],
row['f'][2]['v'])
page_token = page.get('pageToken')
if not page_token:
break
def __create_summary_table(self):
summary_table_schema = [
('run_id', 'STRING', 'Test run id'),
('image_type', 'STRING', 'Client or Server?'),
('pod_name', 'STRING', 'GKE pod hosting this image'),
('event_date', 'STRING', 'The date of this event'),
('event_type', 'STRING', 'STARTING/RUNNING/SUCCESS/FAILURE'),
('details', 'STRING', 'Any other relevant details')
]
desc = ('The table that contains STARTING/RUNNING/SUCCESS/FAILURE events '
'for the stress test clients and servers')
return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
self.summary_table_id, summary_table_schema,
desc)
def __create_qps_table(self):
qps_table_schema = [
('run_id', 'STRING', 'Test run id'),
('pod_name', 'STRING', 'GKE pod hosting this image'),
('recorded_at', 'STRING', 'Metrics recorded at time'),
('qps', 'INTEGER', 'Queries per second')
]
desc = 'The table that cointains the qps recorded at various intervals'
return bq_utils.create_table(self.bq, self.project_id, self.dataset_id,
self.qps_table_id, qps_table_schema, desc)

@ -1,269 +0,0 @@
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
import json
_REQUEST_TIMEOUT_SECS = 10
def _make_pod_config(pod_name, image_name, container_port_list, cmd_list,
arg_list, env_dict):
"""Creates a string containing the Pod defintion as required by the Kubernetes API"""
body = {
'kind': 'Pod',
'apiVersion': 'v1',
'metadata': {
'name': pod_name,
'labels': {'name': pod_name}
},
'spec': {
'containers': [
{
'name': pod_name,
'image': image_name,
'ports': [{'containerPort': port,
'protocol': 'TCP'}
for port in container_port_list],
'imagePullPolicy': 'Always'
}
]
}
}
env_list = [{'name': k, 'value': v} for (k, v) in env_dict.iteritems()]
if len(env_list) > 0:
body['spec']['containers'][0]['env'] = env_list
# Add the 'Command' and 'Args' attributes if they are passed.
# Note:
# - 'Command' overrides the ENTRYPOINT in the Docker Image
# - 'Args' override the CMD in Docker image (yes, it is confusing!)
if len(cmd_list) > 0:
body['spec']['containers'][0]['command'] = cmd_list
if len(arg_list) > 0:
body['spec']['containers'][0]['args'] = arg_list
return json.dumps(body)
def _make_service_config(service_name, pod_name, service_port_list,
container_port_list, is_headless):
"""Creates a string containing the Service definition as required by the Kubernetes API.
NOTE:
This creates either a Headless Service or 'LoadBalancer' service depending on
the is_headless parameter. For Headless services, there is no 'type' attribute
and the 'clusterIP' attribute is set to 'None'. Also, if the service is
Headless, Kubernetes creates DNS entries for Pods - i.e creates DNS A-records
mapping the service's name to the Pods' IPs
"""
if len(container_port_list) != len(service_port_list):
print(
'ERROR: container_port_list and service_port_list must be of same size')
return ''
body = {
'kind': 'Service',
'apiVersion': 'v1',
'metadata': {
'name': service_name,
'labels': {
'name': service_name
}
},
'spec': {
'ports': [],
'selector': {
'name': pod_name
}
}
}
# Populate the 'ports' list in the 'spec' section. This maps service ports
# (port numbers that are exposed by Kubernetes) to container ports (i.e port
# numbers that are exposed by your Docker image)
for idx in range(len(container_port_list)):
port_entry = {
'port': service_port_list[idx],
'targetPort': container_port_list[idx],
'protocol': 'TCP'
}
body['spec']['ports'].append(port_entry)
# Make this either a LoadBalancer service or a headless service depending on
# the is_headless parameter
if is_headless:
body['spec']['clusterIP'] = 'None'
else:
body['spec']['type'] = 'LoadBalancer'
return json.dumps(body)
def _print_connection_error(msg):
print('ERROR: Connection failed. Did you remember to run Kubenetes proxy on '
'localhost (i.e kubectl proxy --port=<proxy_port>) ?. Error: %s' % msg)
def _do_post(post_url, api_name, request_body):
"""Helper to do HTTP POST.
Note:
1) On success, Kubernetes returns a success code of 201(CREATED) not 200(OK)
2) A response code of 509(CONFLICT) is interpreted as a success code (since
the error is most likely due to the resource already existing). This makes
_do_post() idempotent which is semantically desirable.
"""
is_success = True
try:
r = requests.post(post_url,
data=request_body,
timeout=_REQUEST_TIMEOUT_SECS)
if r.status_code == requests.codes.conflict:
print('WARN: Looks like the resource already exists. Api: %s, url: %s' %
(api_name, post_url))
elif r.status_code != requests.codes.created:
print('ERROR: %s API returned error. HTTP response: (%d) %s' %
(api_name, r.status_code, r.text))
is_success = False
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as e:
is_success = False
_print_connection_error(str(e))
return is_success
def _do_delete(del_url, api_name):
"""Helper to do HTTP DELETE.
Note: A response code of 404(NOT_FOUND) is treated as success to keep
_do_delete() idempotent.
"""
is_success = True
try:
r = requests.delete(del_url, timeout=_REQUEST_TIMEOUT_SECS)
if r.status_code == requests.codes.not_found:
print('WARN: The resource does not exist. Api: %s, url: %s' %
(api_name, del_url))
elif r.status_code != requests.codes.ok:
print('ERROR: %s API returned error. HTTP response: %s' %
(api_name, r.text))
is_success = False
except (requests.exceptions.Timeout,
requests.exceptions.ConnectionError) as e:
is_success = False
_print_connection_error(str(e))
return is_success
def create_service(kube_host, kube_port, namespace, service_name, pod_name,
service_port_list, container_port_list, is_headless):
"""Creates either a Headless Service or a LoadBalancer Service depending
on the is_headless parameter.
"""
post_url = 'http://%s:%d/api/v1/namespaces/%s/services' % (
kube_host, kube_port, namespace)
request_body = _make_service_config(service_name, pod_name, service_port_list,
container_port_list, is_headless)
return _do_post(post_url, 'Create Service', request_body)
def create_pod(kube_host, kube_port, namespace, pod_name, image_name,
container_port_list, cmd_list, arg_list, env_dict):
"""Creates a Kubernetes Pod.
Note that it is generally NOT considered a good practice to directly create
Pods. Typically, the recommendation is to create 'Controllers' to create and
manage Pods' lifecycle. Currently Kubernetes only supports 'Replication
Controller' which creates a configurable number of 'identical Replicas' of
Pods and automatically restarts any Pods in case of failures (for eg: Machine
failures in Kubernetes). This makes it less flexible for our test use cases
where we might want slightly different set of args to each Pod. Hence we
directly create Pods and not care much about Kubernetes failures since those
are very rare.
"""
post_url = 'http://%s:%d/api/v1/namespaces/%s/pods' % (kube_host, kube_port,
namespace)
request_body = _make_pod_config(pod_name, image_name, container_port_list,
cmd_list, arg_list, env_dict)
return _do_post(post_url, 'Create Pod', request_body)
def delete_service(kube_host, kube_port, namespace, service_name):
del_url = 'http://%s:%d/api/v1/namespaces/%s/services/%s' % (
kube_host, kube_port, namespace, service_name)
return _do_delete(del_url, 'Delete Service')
def delete_pod(kube_host, kube_port, namespace, pod_name):
del_url = 'http://%s:%d/api/v1/namespaces/%s/pods/%s' % (kube_host, kube_port,
namespace, pod_name)
return _do_delete(del_url, 'Delete Pod')
def create_pod_and_service(kube_host, kube_port, namespace, pod_name,
image_name, container_port_list, cmd_list, arg_list,
env_dict, is_headless_service):
"""A helper function that creates a pod and a service (if pod creation was successful)."""
is_success = create_pod(kube_host, kube_port, namespace, pod_name, image_name,
container_port_list, cmd_list, arg_list, env_dict)
if not is_success:
print 'Error in creating Pod'
return False
is_success = create_service(
kube_host,
kube_port,
namespace,
pod_name, # Use pod_name for service
pod_name,
container_port_list, # Service port list same as container port list
container_port_list,
is_headless_service)
if not is_success:
print 'Error in creating Service'
return False
print 'Successfully created the pod/service %s' % pod_name
return True
def delete_pod_and_service(kube_host, kube_port, namespace, pod_name):
""" A helper function that calls delete_pod and delete_service """
is_success = delete_pod(kube_host, kube_port, namespace, pod_name)
if not is_success:
print 'Error in deleting pod %s' % pod_name
return False
# Note: service name assumed to the the same as pod name
is_success = delete_service(kube_host, kube_port, namespace, pod_name)
if not is_success:
print 'Error in deleting service %s' % pod_name
return False
print 'Successfully deleted the Pod/Service: %s' % pod_name
return True

@ -1,37 +0,0 @@
#!/usr/bin/env bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This script is invoked by Jenkins and runs interop test suite.
set -ex
# Enter the gRPC repo root
cd $(dirname $0)/../..
tools/run_tests/run_stress_tests.py -l all -s all -j 12 $@ || true

@ -56,6 +56,10 @@ _INTERESTING = (
'writes_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_cas_per_iteration',
'atm_add_per_iteration', 'atm_add_per_iteration',
'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration'
'nows_per_iteration', 'nows_per_iteration',
) )

@ -41438,7 +41438,7 @@
{ {
"args": [ "args": [
"--scenarios_json", "--scenarios_json",
"{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}" "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -42217,7 +42217,7 @@
{ {
"args": [ "args": [
"--scenarios_json", "--scenarios_json",
"{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}" "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -43074,7 +43074,7 @@
{ {
"args": [ "args": [
"--scenarios_json", "--scenarios_json",
"{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}" "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -44256,7 +44256,7 @@
{ {
"args": [ "args": [
"--scenarios_json", "--scenarios_json",
"{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}" "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [

@ -53,6 +53,7 @@ HISTOGRAM_PARAMS = {
# actual target will be slightly higher) # actual target will be slightly higher)
OUTSTANDING_REQUESTS={ OUTSTANDING_REQUESTS={
'async': 6400, 'async': 6400,
'async-1core': 800,
'sync': 1000 'sync': 1000
} }
@ -265,7 +266,7 @@ class CXXLanguage:
rpc_type='STREAMING', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True, unconstrained_client='async-1core', use_generic_payload=True,
async_server_threads=1, async_server_threads=1,
secure=secure) secure=secure)
@ -752,7 +753,7 @@ class JavaLanguage:
yield _ping_pong_scenario( yield _ping_pong_scenario(
'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING', 'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER', client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True, unconstrained_client='async-1core', use_generic_payload=True,
async_server_threads=1, async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS) secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)

@ -1,331 +0,0 @@
#!/usr/bin/env python
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run stress test in C++"""
from __future__ import print_function
import argparse
import atexit
import itertools
import json
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import time
import uuid
import six
import python_utils.dockerjob as dockerjob
import python_utils.jobset as jobset
# Docker doesn't clean up after itself, so we do it on exit.
atexit.register(lambda: subprocess.call(['stty', 'echo']))
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
_DEFAULT_SERVER_PORT = 8080
_DEFAULT_METRICS_PORT = 8081
_DEFAULT_TEST_CASES = 'empty_unary:20,large_unary:20,client_streaming:20,server_streaming:20,empty_stream:20'
_DEFAULT_NUM_CHANNELS_PER_SERVER = 5
_DEFAULT_NUM_STUBS_PER_CHANNEL = 10
# 15 mins default
_DEFAULT_TEST_DURATION_SECS = 900
class CXXLanguage:
def __init__(self):
self.client_cwd = None
self.server_cwd = None
self.safename = 'cxx'
def client_cmd(self, args):
return ['bins/opt/stress_test'] + args
def server_cmd(self, args):
return ['bins/opt/interop_server'] + args
def global_env(self):
return {}
def __str__(self):
return 'c++'
_LANGUAGES = {'c++': CXXLanguage(),}
# languages supported as cloud_to_cloud servers
_SERVERS = ['c++']
DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
"""Wraps given cmdline array to create 'docker run' cmdline from it."""
docker_cmdline = ['docker', 'run', '-i', '--rm=true']
# turn environ into -e docker args
if environ:
for k, v in environ.items():
docker_cmdline += ['-e', '%s=%s' % (k, v)]
# set working directory
workdir = DOCKER_WORKDIR_ROOT
if cwd:
workdir = os.path.join(workdir, cwd)
docker_cmdline += ['-w', workdir]
docker_cmdline += docker_args + [image] + cmdline
return docker_cmdline
def bash_login_cmdline(cmdline):
"""Creates bash -l -c cmdline from args list."""
# Use login shell:
# * rvm and nvm require it
# * makes error messages clearer if executables are missing
return ['bash', '-l', '-c', ' '.join(cmdline)]
def _job_kill_handler(job):
if job._spec.container_name:
dockerjob.docker_kill(job._spec.container_name)
# When the job times out and we decide to kill it,
# we need to wait a before restarting the job
# to prevent "container name already in use" error.
# TODO(jtattermusch): figure out a cleaner way to to this.
time.sleep(2)
def cloud_to_cloud_jobspec(language,
test_cases,
server_addresses,
test_duration_secs,
num_channels_per_server,
num_stubs_per_channel,
metrics_port,
docker_image=None):
"""Creates jobspec for cloud-to-cloud interop test"""
cmdline = bash_login_cmdline(language.client_cmd([
'--test_cases=%s' % test_cases, '--server_addresses=%s' %
server_addresses, '--test_duration_secs=%s' % test_duration_secs,
'--num_stubs_per_channel=%s' % num_stubs_per_channel,
'--num_channels_per_server=%s' % num_channels_per_server,
'--metrics_port=%s' % metrics_port
]))
print(cmdline)
cwd = language.client_cwd
environ = language.global_env()
if docker_image:
container_name = dockerjob.random_name('interop_client_%s' %
language.safename)
cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
environ=environ,
cwd=cwd,
docker_args=['--net=host', '--name', container_name])
cwd = None
test_job = jobset.JobSpec(cmdline=cmdline,
cwd=cwd,
environ=environ,
shortname='cloud_to_cloud:%s:%s_server:stress_test' % (
language, server_name),
timeout_seconds=test_duration_secs * 2,
flake_retries=0,
timeout_retries=0,
kill_handler=_job_kill_handler)
test_job.container_name = container_name
return test_job
def server_jobspec(language, docker_image, test_duration_secs):
"""Create jobspec for running a server"""
container_name = dockerjob.random_name('interop_server_%s' %
language.safename)
cmdline = bash_login_cmdline(language.server_cmd(['--port=%s' %
_DEFAULT_SERVER_PORT]))
environ = language.global_env()
docker_cmdline = docker_run_cmdline(
cmdline,
image=docker_image,
cwd=language.server_cwd,
environ=environ,
docker_args=['-p', str(_DEFAULT_SERVER_PORT), '--name', container_name])
server_job = jobset.JobSpec(cmdline=docker_cmdline,
environ=environ,
shortname='interop_server_%s' % language,
timeout_seconds=test_duration_secs * 3)
server_job.container_name = container_name
return server_job
def build_interop_stress_image_jobspec(language, tag=None):
"""Creates jobspec for building stress test docker image for a language"""
if not tag:
tag = 'grpc_interop_stress_%s:%s' % (language.safename, uuid.uuid4())
env = {'INTEROP_IMAGE': tag,
'BASE_NAME': 'grpc_interop_stress_%s' % language.safename}
build_job = jobset.JobSpec(cmdline=['tools/run_tests/dockerize/build_interop_stress_image.sh'],
environ=env,
shortname='build_docker_%s' % (language),
timeout_seconds=30 * 60)
build_job.tag = tag
return build_job
argp = argparse.ArgumentParser(description='Run stress tests.')
argp.add_argument('-l',
'--language',
choices=['all'] + sorted(_LANGUAGES),
nargs='+',
default=['all'],
help='Clients to run.')
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument(
'-s',
'--server',
choices=['all'] + sorted(_SERVERS),
action='append',
help='Run cloud_to_cloud servers in a separate docker ' + 'image.',
default=[])
argp.add_argument(
'--override_server',
action='append',
type=lambda kv: kv.split('='),
help=
'Use servername=HOST:PORT to explicitly specify a server. E.g. '
'csharp=localhost:50000',
default=[])
argp.add_argument('--test_duration_secs',
help='The duration of the test in seconds',
default=_DEFAULT_TEST_DURATION_SECS)
args = argp.parse_args()
servers = set(
s
for s in itertools.chain.from_iterable(_SERVERS if x == 'all' else [x]
for x in args.server))
languages = set(_LANGUAGES[l] for l in itertools.chain.from_iterable(
six.iterkeys(_LANGUAGES) if x == 'all' else [x] for x in args.language))
docker_images = {}
# languages for which to build docker images
languages_to_build = set(
_LANGUAGES[k]
for k in set([str(l) for l in languages] + [s for s in servers]))
build_jobs = []
for l in languages_to_build:
job = build_interop_stress_image_jobspec(l)
docker_images[str(l)] = job.tag
build_jobs.append(job)
if build_jobs:
jobset.message('START', 'Building interop docker images.', do_newline=True)
num_failures, _ = jobset.run(build_jobs,
newline_on_success=True,
maxjobs=args.jobs)
if num_failures == 0:
jobset.message('SUCCESS',
'All docker images built successfully.',
do_newline=True)
else:
jobset.message('FAILED',
'Failed to build interop docker images.',
do_newline=True)
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
# Start interop servers.
server_jobs = {}
server_addresses = {}
try:
for s in servers:
lang = str(s)
spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang), args.test_duration_secs)
job = dockerjob.DockerJob(spec)
server_jobs[lang] = job
server_addresses[lang] = ('localhost',
job.mapped_port(_DEFAULT_SERVER_PORT))
jobs = []
for server in args.override_server:
server_name = server[0]
(server_host, server_port) = server[1].split(':')
server_addresses[server_name] = (server_host, server_port)
for server_name, server_address in server_addresses.items():
(server_host, server_port) = server_address
for language in languages:
test_job = cloud_to_cloud_jobspec(
language,
_DEFAULT_TEST_CASES,
('%s:%s' % (server_host, server_port)),
args.test_duration_secs,
_DEFAULT_NUM_CHANNELS_PER_SERVER,
_DEFAULT_NUM_STUBS_PER_CHANNEL,
_DEFAULT_METRICS_PORT,
docker_image=docker_images.get(str(language)))
jobs.append(test_job)
if not jobs:
print('No jobs to run.')
for image in six.itervalues(docker_images):
dockerjob.remove_image(image, skip_nonexistent=True)
sys.exit(1)
num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
maxjobs=args.jobs)
if num_failures:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
else:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
finally:
# Check if servers are still running.
for server, job in server_jobs.items():
if not job.is_running():
print('Server "%s" has exited prematurely.' % server)
dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
for image in six.itervalues(docker_images):
print('Removing docker image %s' % image)
dockerjob.remove_image(image)

@ -1,76 +0,0 @@
Running Stress tests on Google Container Engine
=======================================
### **Glossary**:
* GCP: Google Cloud Platform
* GCE: Google Compute Engine
* GKE: Google Container Engine
* GCP console: https://console.cloud.google.com
### **Setup Instructions**
#### *On GCP:*
1. Login to GCP with your Google account (for example, your @gmail account) at https://cloud.google.com. If do not have a Google account, you will have to create an account first.
2. Enable billing on Google cloud platform. Instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin) (see the '*Enable billing*' section).
3. Create a Project from the [GCP console](https://console.cloud.google.com).i.e Click on the project dropdown box on the top right (to the right of the search box) and click '*Create a project*' option.
4. Enable the Container Engine API. Instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin) (See the '*Enable the Container Engine API*’ section). Alternatively, you can do the following:
- Click on the '*Products & Services*' icon on the top left (i.e the icon with three small horizontal bars) and select '*API Manager*'
- Select the '*Container Engine API*' under '*Google Cloud APIs*' on the main page. Note that you might have to click on '*More*' under '*Google Cloud APIs*' to see the '*Container Engine API*' link
- Click on the '*Enable*' button. If the API is already enabled, the button's label would be '*Disable*' instead (do NOT click the button if its label is '*Disable*')
5. Create a Cluster from the GCP console.
- Go to the Container Engine section from GCP console i.e: Click on the '*Products & Services*' icon on the top left (i.e the icon with three small horizontal bars) and click on '*Container Engine*'
- Click '*Create Container Cluster*' and follow the instructions.
- The instructions for 'Name/Zone/MachineType' etc are [here](https://cloud.google.com/container-engine/docs/clusters/operations) (**NOTE**: The page also has instructions to setting up default clusters and configuring `kubectl`. We will be doing that later)
- For the cluster size, a smaller size of < 10 GCE instances is good enough for our use cases - assuming that we are planning to run a reasonably small number of stress client instances. For the machine type, something like '2 vCPUs 7.5 GB' (available in the drop down box) should be good enough.
- **IMPORTANT**: Before hitting the '*Create*' button, click on '*More*' link just above the '*Create*' button and Select '*Enabled*' for BigQuery , '*Enabled*' for Cloud Platform and '*Read/Write*' for Cloud User Accounts.
- Create the cluster by clicking '*Create*' button.
#### *On your machine* (or the machine from which stress tests on GKE are launched):
1. You need a working gRPC repository on your machine. If you do not have it, clone the grpc repository from github (https://github.com/grpc/grpc) and follow the instructions [here](https://github.com/grpc/grpc/blob/master/INSTALL.md)
2. Install Docker. Instructions [here](https://docs.docker.com/engine/installation/)
3. Install Google Cloud SDK. Instructions [here](https://cloud.google.com/sdk/). This installs the `gcloud` tool
4. Install `kubectl`, Kubernetes command line tool using `gcloud`. i.e
- `$ gcloud components update kubectl`
- NOTE: If you are running this from a GCE instance, the command may fail with the following error:
```
You cannot perform this action because this Cloud SDK installation is
managed by an external package manager. If you would like to get the
latest version, please see our main download page at:
https://developers.google.com/cloud/sdk/
ERROR: (gcloud.components.update) The component manager is disabled for this installation
```
-- If so, you will have to manually install Cloud SDK by doing the following
```shell
$ # The following installs latest Cloud SDK and updates the PATH
$ # (Accept the default values when prompted)
$ curl https://sdk.cloud.google.com | bash
$ exec -l $SHELL
$ # Set the defaults. Pick the default GCE credentials when prompted (The service account
$ # name will have a name similar to: "xxx-compute@developer.gserviceaccount.com")
$ gcloud init
```
5. Install Google python client apis:
- `‘$ sudo pip install --upgrade google-api-python-client’`
- **Note**: Do `$ sudo apt-get install python-pip` (or `$ easy_install -U pip`) if you do not have pip
6. Install the `requests` Python package if you don’t have it already by doing `sudo pip install requests`. More details regarding `requests` package are [here](http://docs.python-requests.org/en/master/user/install/)
7. Set the `gcloud` defaults: See the instructions [here](https://cloud.google.com/container-engine/docs/before-you-begin) under "*Set gcloud defaults*" section)
- Make sure you also fetch the cluster credentials for `kubectl` command to use. I.e `$ gcloud container clusters get-credentials CLUSTER_NAME`
### **Launching Stress tests**
The stress tests are launched by the following script (path is relative to GRPC root directory) :
`tools/run_tests/stress_test/run_stress_tests_on_gke.py`
You can find out more details by using the `--help` flag.
- `<grpc_root_dir>$ tools/run_tests/stress_test/run_on_gke.py --help`
> **Example**
> ```bash
> $ # Change to the grpc root directory
> $ cd $GRPC_ROOT
> $ tools/run_tests/stress_test/run_on_gke.py --project_id=sree-gce --config_file=tools/run_tests/stress_test/configs/opt.json
> ```
> The above runs the stress test on GKE under the project `sree-gce` in the default cluster (that you set by `gcloud` command earlier). The test settings (like number of client instances, servers, the parmeters to pass, test cases etc) are all loaded from the config file `$GRPC_ROOT/tools/run_tests/stress_test/opt.json`

@ -1,25 +0,0 @@
Stress Test client Specification
=========================
This document specifies the features a stress test client should implement in order to work with the stress testing framework. The stress test clients are executed against the existing interop test servers.
**Requirements**
--------------
**1.** A stress test client should be able to repeatedly execute one or more of the existing 'interop test cases'. It may just be a wrapper around the existing interop test client. The exact command line arguments the client should support are listed in _Table 1_ below.
**2.** The stress test client must implement a metrics server defined by _[metrics.proto](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/metrics.proto)_ and must expose _qps_ as a `Long`-valued Gauge. The client can track the overall _qps_ in one Gauge or in multiple Gauges (for example: One per Channel or Stub).
The framework periodically queries the _qps_ by calling the `GetAllGauges()` method (the framework assumes that all the returned Gauges are _qps_ Gauges and adds them up to determine the final qps) and uses this to determine if the stress test client is running or crashed or stalled.
> *Note:* In this context, the term _**qps**_ means _interop test cases per second_ (not _messages per second_ or _rpc calls per second_)
**Table 1:** Command line arguments that should be supported by the stress test client.
>_**Note** The current C++ [stress client](https://github.com/grpc/grpc/blob/master/test/cpp/interop/stress_test.cc) supports more flags than those listed here but those flags will soon be deprecated._
Parameter | Description
----------------------|---------------------------------
`--server_addresses` | The stress client should accept a list of server addresses in the following format:<br> ```<name_1>:<port_1>,<name_2>:<port_2>..<name_N>:<port_N>``` <br> _Note:_ `<name>` can be either server name or IP address.<br><br>_Type:_ string <br>_default:_ ```localhost:8080``` <br>_Example:_ ``foo.foobar.com:8080,bar.foobar.com:8080`` <br><br> Currently, the stress test framework only passes one server address to the client.
`--test_cases` | List of test cases along with the relative weights in the following format:<br> `<testcase_1:w_1>,<testcase_2:w_2>...<testcase_n:w_n>`. <br> The test cases names are the same as those currently used by the interop clients<br><br>_Type:_ string <br>_Example:_ `empty_unary:20,large_unary:10,empty_stream:70` <br>(The stress client would then make `empty_unary` calls 20% of the time, `large_unary` calls 10% of the time and `empty_stream` calls 70% of the time.) <br>_Note:_ The weights need not add up to 100.
`--test_duration_secs` | The test duration in seconds. A value of -1 means that the test should run forever until forcefully terminated. <br>_Type:_ int <br>_default:_ -1
`--num_channels_per_server` | Number of channels (i.e connections) to each server. <br> _Type:_ int <br> _default:_ 1 <br><br> _Note:_ Unfortunately, the term `channel` is used differently in `grpc-java` and `C based grpc`. In this context, this really means "number of connections to the server"
`--num_stubs_per_channel ` | Number of client stubs per each connection to server.<br>_Type:_ int <br>_default:_ 1
`--metrics_port` | The port at which the stress client exposes [QPS metrics](https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/metrics.proto). <br>_Type:_ int <br>_default:_ 8081

@ -1,85 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_asan" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "asan"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 120,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"cxx_client_asan": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/asan/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/asan/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_asan": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/asan/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-asan": {
"serverTemplate": "cxx_server_asan",
"dockerImage": "grpc_stress_cxx_asan",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-asan": {
"clientTemplate": "cxx_client_asan",
"dockerImage": "grpc_stress_cxx_asan",
"numInstances": 5,
"serverPodSpec": "stress-server-asan"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8003,
"datasetIdNamePrefix": "stress_test_asan",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,91 +0,0 @@
{
"dockerImages": {
"grpc_stress_csharp" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_csharp"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 100,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true",
"deadline_secs": 60
}
}
},
"templates": {
"csharp_client": {
"baseTemplate": "default",
"stressClientCmd": [
"mono",
"/var/local/git/grpc/src/csharp/Grpc.IntegrationTesting.StressClient/bin/Debug/Grpc.IntegrationTesting.StressClient.exe"
],
"metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"csharp_server": {
"baseTemplate": "default",
"stressServerCmd": [
"mono",
"/var/local/git/grpc/src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/Grpc.IntegrationTesting.Server.exe"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-csharp": {
"serverTemplate": "csharp_server",
"dockerImage": "grpc_stress_csharp",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-csharp": {
"clientTemplate": "csharp_client",
"dockerImage": "grpc_stress_csharp",
"numInstances": 10,
"serverPodSpec": "stress-server-csharp"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 100,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8009,
"datasetIdNamePrefix": "stress_test_csharp",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,96 +0,0 @@
{
"dockerImages": {
"grpc_stress_go" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_go"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"go_client": {
"baseTemplate": "default",
"stressClientCmd": [
"go",
"run",
"/go/src/google.golang.org/grpc/stress/client/main.go"
],
"metricsClientCmd": [
"go",
"run",
"/go/src/google.golang.org/grpc/stress/metrics_client/main.go"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"go_server": {
"baseTemplate": "default",
"stressServerCmd": [
"go",
"run",
"/go/src/google.golang.org/grpc/interop/server/server.go"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"go-stress-server": {
"serverTemplate": "go_server",
"dockerImage": "grpc_stress_go",
"numInstances": 1
}
},
"clientPodSpecs": {
"go-stress-client": {
"clientTemplate": "go_client",
"dockerImage": "grpc_stress_go",
"numInstances": 15,
"serverPodSpec": "go-stress-server"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8007,
"datasetIdNamePrefix": "stress_test_go",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,98 +0,0 @@
{
"dockerImages": {
"grpc_stress_java" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_java"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 100,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true",
"deadline_secs": 60
},
"env": {
"STRESSTEST_CLIENT_OPTS":"-Xmx3g -Xms3g -XX:NewSize=1500m -XX:MaxNewSize=1500m -XX:+UseConcMarkSweepGC"
}
}
},
"templates": {
"java_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/stresstest-client"
],
"metricsClientCmd": [
"/var/local/git/grpc/bins/opt/metrics_client"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080,
"use_tls": "false"
},
"env": {
"TEST_SERVER_OPTS":"-Xmx3g -Xms3g -XX:NewSize=1500m -XX:MaxNewSize=1500m -XX:+UseConcMarkSweepGC"
}
}
},
"templates": {
"java_server": {
"baseTemplate": "default",
"stressServerCmd": [
"/var/local/git/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/test-server"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"java-stress-server": {
"serverTemplate": "java_server",
"dockerImage": "grpc_stress_java",
"numInstances": 1
}
},
"clientPodSpecs": {
"java-stress-client": {
"clientTemplate": "java_client",
"dockerImage": "grpc_stress_java",
"numInstances": 10,
"serverPodSpec": "java-stress-server"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 100,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8008,
"datasetIdNamePrefix": "stress_test_java",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,97 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_opt" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "opt"
},
"grpc_stress_node": {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_node"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"node_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
"node",
"/var/local/git/grpc/src/node/stress/stress_client.js"
],
"metricsClientCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
"node",
"/var/local/git/grpc/src/node/stress/metrics_client.js"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_opt": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-cxx-opt": {
"serverTemplate": "cxx_server_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-node": {
"clientTemplate": "node_client",
"dockerImage": "grpc_stress_node",
"numInstances": 20,
"serverPodSpec": "stress-server-cxx-opt"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8006,
"datasetIdNamePrefix": "stress_test_node_cxx_opt",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,96 +0,0 @@
{
"dockerImages": {
"grpc_stress_node" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_node"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"node_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
"node",
"/var/local/git/grpc/src/node/stress/stress_client.js"
],
"metricsClientCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
"node",
"/var/local/git/grpc/src/node/stress/metrics_client.js"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"node_server": {
"baseTemplate": "default",
"stressServerCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_node.sh",
"node",
"/var/local/git/grpc/src/node/interop/interop_server.js"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"node-stress-server": {
"serverTemplate": "node_server",
"dockerImage": "grpc_stress_node",
"numInstances": 1
}
},
"clientPodSpecs": {
"node-stress-client": {
"clientTemplate": "node_client",
"dockerImage": "grpc_stress_node",
"numInstances": 15,
"serverPodSpec": "node-stress-server"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8005,
"datasetIdNamePrefix": "stress_test_node",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,134 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_opt" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "opt"
},
"grpc_stress_cxx_tsan": {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "tsan"
},
"grpc_stress_cxx_asan": {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "asan"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"cxx_client_opt": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/opt/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
},
"cxx_client_tsan": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/tsan/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/tsan/metrics_client"]
},
"cxx_client_asan": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/asan/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/asan/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_opt": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
},
"cxx_server_tsan": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/tsan/interop_server"]
},
"cxx_server_asan": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/asan/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-opt": {
"serverTemplate": "cxx_server_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 1
},
"stress-server-tsan": {
"serverTemplate": "cxx_server_tsan",
"dockerImage": "grpc_stress_cxx_tsan",
"numInstances": 1
},
"stress-server-asan": {
"serverTemplate": "cxx_server_asan",
"dockerImage": "grpc_stress_cxx_asan",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-opt": {
"clientTemplate": "cxx_client_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 5,
"serverPodSpec": "stress-server-opt"
},
"stress-client-tsan": {
"clientTemplate": "cxx_client_tsan",
"dockerImage": "grpc_stress_cxx_tsan",
"numInstances": 10,
"serverPodSpec": "stress-server-tsan"
},
"stress-client-asan": {
"clientTemplate": "cxx_client_asan",
"dockerImage": "grpc_stress_cxx_asan",
"numInstances": 10,
"serverPodSpec": "stress-server-asan"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8004,
"datasetIdNamePrefix": "stress_test_opt_tsan",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,85 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_opt" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "opt"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"cxx_client_opt": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/opt/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_opt": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-opt": {
"serverTemplate": "cxx_server_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-opt": {
"clientTemplate": "cxx_client_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 15,
"serverPodSpec": "stress-server-opt"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8001,
"datasetIdNamePrefix": "stress_test_opt",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,93 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_opt" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "opt"
},
"grpc_stress_php": {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_php"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081"
}
}
},
"templates": {
"php_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc/src/php/bin/stress_client.sh"
],
"metricsClientCmd": [
"php",
"/var/local/git/grpc/src/php/tests/interop/metrics_client.php"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_opt": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-cxx-php": {
"serverTemplate": "cxx_server_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-php": {
"clientTemplate": "php_client",
"dockerImage": "grpc_stress_php",
"numInstances": 20,
"serverPodSpec": "stress-server-cxx-php"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8010,
"datasetIdNamePrefix": "stress_test_php_cxx_opt",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,98 +0,0 @@
{
"dockerImages": {
"grpc_stress_python" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_python"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
},
"env": {
"PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
"LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
}
}
},
"templates": {
"python_client": {
"baseTemplate": "default",
"stressClientCmd": [
"python",
"/var/local/git/grpc/src/python/grpcio/tests/stress/client.py"
],
"metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
},
"env": {
"PYTHONPATH": "/var/local/git/grpc/src/python/gens:/var/local/git/grpc/src/python/grpcio",
"LD_LIBRARY_PATH":"/var/local/git/grpc/libs/opt"
}
}
},
"templates": {
"python_server": {
"baseTemplate": "default",
"stressServerCmd": [
"python",
"/var/local/git/grpc/src/python/grpcio/tests/interop/server.py"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"python-stress-server": {
"serverTemplate": "python_server",
"dockerImage": "grpc_stress_python",
"numInstances": 1
}
},
"clientPodSpecs": {
"python-stress-client": {
"clientTemplate": "python_client",
"dockerImage": "grpc_stress_python",
"numInstances": 5,
"serverPodSpec": "python-stress-server"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8011,
"datasetIdNamePrefix": "stress_test_python",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,92 +0,0 @@
{
"dockerImages": {
"grpc_stress_ruby" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_ruby"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"ruby_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
"ruby",
"/var/local/git/grpc/src/ruby/stress/stress_client.rb"
],
"metricsClientCmd": ["/var/local/git/grpc/bins/opt/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"ruby_server": {
"baseTemplate": "default",
"stressServerCmd": [
"/var/local/git/grpc/tools/gcp/stress_test/run_ruby.sh",
"ruby",
"/var/local/git/grpc/src/ruby/pb/test/server.rb"
]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-ruby": {
"serverTemplate": "ruby_server",
"dockerImage": "grpc_stress_ruby",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-ruby": {
"clientTemplate": "ruby_client",
"dockerImage": "grpc_stress_ruby",
"numInstances": 10,
"serverPodSpec": "stress-server-ruby"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8001,
"datasetIdNamePrefix": "stress_test_ruby",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,85 +0,0 @@
{
"dockerImages": {
"grpc_stress_cxx_tsan" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "tsan"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 120,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081",
"total_only": "true"
}
}
},
"templates": {
"cxx_client_tsan": {
"baseTemplate": "default",
"stressClientCmd": ["/var/local/git/grpc/bins/tsan/stress_test"],
"metricsClientCmd": ["/var/local/git/grpc/bins/tsan/metrics_client"]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_tsan": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/tsan/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-tsan": {
"serverTemplate": "cxx_server_tsan",
"dockerImage": "grpc_stress_cxx_tsan",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-tsan": {
"clientTemplate": "cxx_client_tsan",
"dockerImage": "grpc_stress_cxx_tsan",
"numInstances": 5,
"serverPodSpec": "stress-server-tsan"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8002,
"datasetIdNamePrefix": "stress_test_tsan",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

@ -1,59 +0,0 @@
#!/usr/bin/env python
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import sys
stress_test_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/stress_test'))
sys.path.append(stress_test_utils_dir)
from stress_test_utils import BigQueryHelper
argp = argparse.ArgumentParser(
description='Print summary tables',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argp.add_argument('--gcp_project_id',
required=True,
help='The Google Cloud Platform Project Id')
argp.add_argument('--dataset_id', type=str, required=True)
argp.add_argument('--run_id', type=str, required=True)
argp.add_argument('--summary_table_id', type=str, default='summary')
argp.add_argument('--qps_table_id', type=str, default='qps')
argp.add_argument('--summary_only', action='store_true', default=True)
if __name__ == '__main__':
args = argp.parse_args()
bq_helper = BigQueryHelper(args.run_id, '', '', args.gcp_project_id,
args.dataset_id, args.summary_table_id,
args.qps_table_id)
bq_helper.initialize()
if not args.summary_only:
bq_helper.print_qps_records()
bq_helper.print_summary_records()

@ -1,674 +0,0 @@
#!/usr/bin/env python
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import datetime
import json
import os
import subprocess
import sys
import time
stress_test_utils_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/stress_test'))
sys.path.append(stress_test_utils_dir)
from stress_test_utils import BigQueryHelper
kubernetes_api_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../gcp/utils'))
sys.path.append(kubernetes_api_dir)
import kubernetes_api
class GlobalSettings:
def __init__(self, gcp_project_id, build_docker_images,
test_poll_interval_secs, test_duration_secs,
kubernetes_proxy_port, dataset_id_prefix, summary_table_id,
qps_table_id, pod_warmup_secs):
self.gcp_project_id = gcp_project_id
self.build_docker_images = build_docker_images
self.test_poll_interval_secs = test_poll_interval_secs
self.test_duration_secs = test_duration_secs
self.kubernetes_proxy_port = kubernetes_proxy_port
self.dataset_id_prefix = dataset_id_prefix
self.summary_table_id = summary_table_id
self.qps_table_id = qps_table_id
self.pod_warmup_secs = pod_warmup_secs
class ClientTemplate:
""" Contains all the common settings that are used by a stress client """
def __init__(self, name, stress_client_cmd, metrics_client_cmd, metrics_port,
wrapper_script_path, poll_interval_secs, client_args_dict,
metrics_args_dict, will_run_forever, env_dict):
self.name = name
self.stress_client_cmd = stress_client_cmd
self.metrics_client_cmd = metrics_client_cmd
self.metrics_port = metrics_port
self.wrapper_script_path = wrapper_script_path
self.poll_interval_secs = poll_interval_secs
self.client_args_dict = client_args_dict
self.metrics_args_dict = metrics_args_dict
self.will_run_forever = will_run_forever
self.env_dict = env_dict
class ServerTemplate:
""" Contains all the common settings used by a stress server """
def __init__(self, name, server_cmd, wrapper_script_path, server_port,
server_args_dict, will_run_forever, env_dict):
self.name = name
self.server_cmd = server_cmd
self.wrapper_script_path = wrapper_script_path
self.server_port = server_port
self.server_args_dict = server_args_dict
self.will_run_forever = will_run_forever
self.env_dict = env_dict
class DockerImage:
""" Represents properties of a Docker image. Provides methods to build the
image and push it to GKE registry
"""
def __init__(self, gcp_project_id, image_name, build_script_path,
dockerfile_dir, build_type):
"""Args:
image_name: The docker image name
tag_name: The additional tag name. This is the name used when pushing the
docker image to GKE registry
build_script_path: The path to the build script that builds this docker
image
dockerfile_dir: The name of the directory under
'<grpc_root>/tools/dockerfile' that contains the dockerfile
"""
self.image_name = image_name
self.gcp_project_id = gcp_project_id
self.build_script_path = build_script_path
self.dockerfile_dir = dockerfile_dir
self.build_type = build_type
self.tag_name = self._make_tag_name(gcp_project_id, image_name)
def _make_tag_name(self, project_id, image_name):
return 'gcr.io/%s/%s' % (project_id, image_name)
def build_image(self):
print('Building docker image: %s (tag: %s)' % (self.image_name,
self.tag_name))
os.environ['INTEROP_IMAGE'] = self.image_name
os.environ['INTEROP_IMAGE_REPOSITORY_TAG'] = self.tag_name
os.environ['BASE_NAME'] = self.dockerfile_dir
os.environ['BUILD_TYPE'] = self.build_type
print('DEBUG: path: ', self.build_script_path)
if subprocess.call(args=[self.build_script_path]) != 0:
print('Error in building the Docker image')
return False
return True
def push_to_gke_registry(self):
cmd = ['gcloud', 'docker', 'push', self.tag_name]
print('Pushing %s to the GKE registry..' % self.tag_name)
if subprocess.call(args=cmd) != 0:
print('Error in pushing the image %s to the GKE registry' %
self.tag_name)
return False
return True
class ServerPodSpec:
""" Contains the information required to launch server pods. """
def __init__(self, name, server_template, docker_image, num_instances):
self.name = name
self.template = server_template
self.docker_image = docker_image
self.num_instances = num_instances
def pod_names(self):
""" Return a list of names of server pods to create. """
return ['%s-%d' % (self.name, i) for i in range(1, self.num_instances + 1)]
def server_addresses(self):
""" Return string of server addresses in the following format:
'<server_pod_name_1>:<server_port>,<server_pod_name_2>:<server_port>...'
"""
return ','.join(['%s:%d' % (pod_name, self.template.server_port)
for pod_name in self.pod_names()])
class ClientPodSpec:
""" Contains the information required to launch client pods """
def __init__(self, name, client_template, docker_image, num_instances,
server_addresses):
self.name = name
self.template = client_template
self.docker_image = docker_image
self.num_instances = num_instances
self.server_addresses = server_addresses
def pod_names(self):
""" Return a list of names of client pods to create """
return ['%s-%d' % (self.name, i) for i in range(1, self.num_instances + 1)]
# The client args in the template do not have server addresses. This function
# adds the server addresses and returns the updated client args
def get_client_args_dict(self):
args_dict = self.template.client_args_dict.copy()
args_dict['server_addresses'] = self.server_addresses
return args_dict
class Gke:
""" Class that has helper methods to interact with GKE """
class KubernetesProxy:
"""Class to start a proxy on localhost to talk to the Kubernetes API server"""
def __init__(self, port):
cmd = ['kubectl', 'proxy', '--port=%d' % port]
self.p = subprocess.Popen(args=cmd)
time.sleep(2)
print('\nStarted kubernetes proxy on port: %d' % port)
def __del__(self):
if self.p is not None:
print('Shutting down Kubernetes proxy..')
self.p.kill()
def __init__(self, project_id, run_id, dataset_id, summary_table_id,
qps_table_id, kubernetes_port):
self.project_id = project_id
self.run_id = run_id
self.dataset_id = dataset_id
self.summary_table_id = summary_table_id
self.qps_table_id = qps_table_id
# The environment variables we would like to pass to every pod (both client
# and server) launched in GKE
self.gke_env = {
'RUN_ID': self.run_id,
'GCP_PROJECT_ID': self.project_id,
'DATASET_ID': self.dataset_id,
'SUMMARY_TABLE_ID': self.summary_table_id,
'QPS_TABLE_ID': self.qps_table_id
}
self.kubernetes_port = kubernetes_port
# Start kubernetes proxy
self.kubernetes_proxy = Gke.KubernetesProxy(kubernetes_port)
def _args_dict_to_str(self, args_dict):
return ' '.join('--%s=%s' % (k, args_dict[k]) for k in args_dict.keys())
def launch_servers(self, server_pod_spec):
is_success = True
# The command to run inside the container is the wrapper script (which then
# launches the actual server)
container_cmd = server_pod_spec.template.wrapper_script_path
# The parameters to the wrapper script (defined in
# server_pod_spec.template.wrapper_script_path) are are injected into the
# container via environment variables
server_env = self.gke_env.copy()
server_env.update(server_pod_spec.template.env_dict)
server_env.update({
'STRESS_TEST_IMAGE_TYPE': 'SERVER',
'STRESS_TEST_CMD': server_pod_spec.template.server_cmd,
'STRESS_TEST_ARGS_STR': self._args_dict_to_str(
server_pod_spec.template.server_args_dict),
'WILL_RUN_FOREVER': str(server_pod_spec.template.will_run_forever)
})
for pod_name in server_pod_spec.pod_names():
server_env['POD_NAME'] = pod_name
print('Creating server: %s' % pod_name)
is_success = kubernetes_api.create_pod_and_service(
'localhost',
self.kubernetes_port,
'default', # Use 'default' namespace
pod_name,
server_pod_spec.docker_image.tag_name,
[server_pod_spec.template.server_port], # Ports to expose on the pod
[container_cmd],
[], # Args list is empty since we are passing all args via env variables
server_env,
True # Headless = True for server to that GKE creates a DNS record for pod_name
)
if not is_success:
print('Error in launching server: %s' % pod_name)
break
if is_success:
print('Successfully created server(s)')
return is_success
def launch_clients(self, client_pod_spec):
is_success = True
# The command to run inside the container is the wrapper script (which then
# launches the actual stress client)
container_cmd = client_pod_spec.template.wrapper_script_path
# The parameters to the wrapper script (defined in
# client_pod_spec.template.wrapper_script_path) are are injected into the
# container via environment variables
client_env = self.gke_env.copy()
client_env.update(client_pod_spec.template.env_dict)
client_env.update({
'STRESS_TEST_IMAGE_TYPE': 'CLIENT',
'STRESS_TEST_CMD': client_pod_spec.template.stress_client_cmd,
'STRESS_TEST_ARGS_STR': self._args_dict_to_str(
client_pod_spec.get_client_args_dict()),
'METRICS_CLIENT_CMD': client_pod_spec.template.metrics_client_cmd,
'METRICS_CLIENT_ARGS_STR': self._args_dict_to_str(
client_pod_spec.template.metrics_args_dict),
'POLL_INTERVAL_SECS': str(client_pod_spec.template.poll_interval_secs),
'WILL_RUN_FOREVER': str(client_pod_spec.template.will_run_forever)
})
for pod_name in client_pod_spec.pod_names():
client_env['POD_NAME'] = pod_name
print('Creating client: %s' % pod_name)
is_success = kubernetes_api.create_pod_and_service(
'localhost',
self.kubernetes_port,
'default', # default namespace,
pod_name,
client_pod_spec.docker_image.tag_name,
[client_pod_spec.template.metrics_port], # Ports to expose on the pod
[container_cmd],
[], # Empty args list since all args are passed via env variables
client_env,
True # Client is a headless service (no need for an external ip)
)
if not is_success:
print('Error in launching client %s' % pod_name)
break
if is_success:
print('Successfully created all client(s)')
return is_success
def _delete_pods(self, pod_name_list):
is_success = True
for pod_name in pod_name_list:
print('Deleting %s' % pod_name)
is_success = kubernetes_api.delete_pod_and_service(
'localhost',
self.kubernetes_port,
'default', # default namespace
pod_name)
if not is_success:
print('Error in deleting pod %s' % pod_name)
break
if is_success:
print('Successfully deleted all pods')
return is_success
def delete_servers(self, server_pod_spec):
return self._delete_pods(server_pod_spec.pod_names())
def delete_clients(self, client_pod_spec):
return self._delete_pods(client_pod_spec.pod_names())
class Config:
def __init__(self, config_filename, gcp_project_id):
print('Loading configuration...')
config_dict = self._load_config(config_filename)
self.global_settings = self._parse_global_settings(config_dict,
gcp_project_id)
self.docker_images_dict = self._parse_docker_images(
config_dict, self.global_settings.gcp_project_id)
self.client_templates_dict = self._parse_client_templates(config_dict)
self.server_templates_dict = self._parse_server_templates(config_dict)
self.server_pod_specs_dict = self._parse_server_pod_specs(
config_dict, self.docker_images_dict, self.server_templates_dict)
self.client_pod_specs_dict = self._parse_client_pod_specs(
config_dict, self.docker_images_dict, self.client_templates_dict,
self.server_pod_specs_dict)
print('Loaded Configuaration.')
def _parse_global_settings(self, config_dict, gcp_project_id):
global_settings_dict = config_dict['globalSettings']
return GlobalSettings(gcp_project_id,
global_settings_dict['buildDockerImages'],
global_settings_dict['pollIntervalSecs'],
global_settings_dict['testDurationSecs'],
global_settings_dict['kubernetesProxyPort'],
global_settings_dict['datasetIdNamePrefix'],
global_settings_dict['summaryTableId'],
global_settings_dict['qpsTableId'],
global_settings_dict['podWarmupSecs'])
def _parse_docker_images(self, config_dict, gcp_project_id):
"""Parses the 'dockerImages' section of the config file and returns a
Dictionary of 'DockerImage' objects keyed by docker image names"""
docker_images_dict = {}
docker_config_dict = config_dict['dockerImages']
for image_name in docker_config_dict.keys():
build_script_path = docker_config_dict[image_name]['buildScript']
dockerfile_dir = docker_config_dict[image_name]['dockerFileDir']
build_type = docker_config_dict[image_name].get('buildType', 'opt')
docker_images_dict[image_name] = DockerImage(gcp_project_id, image_name,
build_script_path,
dockerfile_dir, build_type)
return docker_images_dict
def _parse_client_templates(self, config_dict):
"""Parses the 'clientTemplates' section of the config file and returns a
Dictionary of 'ClientTemplate' objects keyed by client template names.
Note: The 'baseTemplates' sub section of the config file contains templates
with default values and the 'templates' sub section contains the actual
client templates (which refer to the base template name to use for default
values).
"""
client_templates_dict = {}
templates_dict = config_dict['clientTemplates']['templates']
base_templates_dict = config_dict['clientTemplates'].get('baseTemplates',
{})
for template_name in templates_dict.keys():
# temp_dict is a temporary dictionary that merges base template dictionary
# and client template dictionary (with client template dictionary values
# overriding base template values)
temp_dict = {}
base_template_name = templates_dict[template_name].get('baseTemplate')
if not base_template_name is None:
temp_dict = base_templates_dict[base_template_name].copy()
temp_dict.update(templates_dict[template_name])
# Create and add ClientTemplate object to the final client_templates_dict
stress_client_cmd = ' '.join(temp_dict['stressClientCmd'])
metrics_client_cmd = ' '.join(temp_dict['metricsClientCmd'])
client_templates_dict[template_name] = ClientTemplate(
template_name, stress_client_cmd, metrics_client_cmd,
temp_dict['metricsPort'], temp_dict['wrapperScriptPath'],
temp_dict['pollIntervalSecs'], temp_dict['clientArgs'].copy(),
temp_dict['metricsArgs'].copy(), temp_dict.get('willRunForever', 1),
temp_dict.get('env', {}).copy())
return client_templates_dict
def _parse_server_templates(self, config_dict):
"""Parses the 'serverTemplates' section of the config file and returns a
Dictionary of 'serverTemplate' objects keyed by server template names.
Note: The 'baseTemplates' sub section of the config file contains templates
with default values and the 'templates' sub section contains the actual
server templates (which refer to the base template name to use for default
values).
"""
server_templates_dict = {}
templates_dict = config_dict['serverTemplates']['templates']
base_templates_dict = config_dict['serverTemplates'].get('baseTemplates',
{})
for template_name in templates_dict.keys():
# temp_dict is a temporary dictionary that merges base template dictionary
# and server template dictionary (with server template dictionary values
# overriding base template values)
temp_dict = {}
base_template_name = templates_dict[template_name].get('baseTemplate')
if not base_template_name is None:
temp_dict = base_templates_dict[base_template_name].copy()
temp_dict.update(templates_dict[template_name])
# Create and add ServerTemplate object to the final server_templates_dict
stress_server_cmd = ' '.join(temp_dict['stressServerCmd'])
server_templates_dict[template_name] = ServerTemplate(
template_name, stress_server_cmd, temp_dict['wrapperScriptPath'],
temp_dict['serverPort'], temp_dict['serverArgs'].copy(),
temp_dict.get('willRunForever', 1), temp_dict.get('env', {}).copy())
return server_templates_dict
def _parse_server_pod_specs(self, config_dict, docker_images_dict,
server_templates_dict):
"""Parses the 'serverPodSpecs' sub-section (under 'testMatrix' section) of
the config file and returns a Dictionary of 'ServerPodSpec' objects keyed
by server pod spec names"""
server_pod_specs_dict = {}
pod_specs_dict = config_dict['testMatrix'].get('serverPodSpecs', {})
for pod_name in pod_specs_dict.keys():
server_template_name = pod_specs_dict[pod_name]['serverTemplate']
docker_image_name = pod_specs_dict[pod_name]['dockerImage']
num_instances = pod_specs_dict[pod_name].get('numInstances', 1)
# Create and add the ServerPodSpec object to the final
# server_pod_specs_dict
server_pod_specs_dict[pod_name] = ServerPodSpec(
pod_name, server_templates_dict[server_template_name],
docker_images_dict[docker_image_name], num_instances)
return server_pod_specs_dict
def _parse_client_pod_specs(self, config_dict, docker_images_dict,
client_templates_dict, server_pod_specs_dict):
"""Parses the 'clientPodSpecs' sub-section (under 'testMatrix' section) of
the config file and returns a Dictionary of 'ClientPodSpec' objects keyed
by client pod spec names"""
client_pod_specs_dict = {}
pod_specs_dict = config_dict['testMatrix'].get('clientPodSpecs', {})
for pod_name in pod_specs_dict.keys():
client_template_name = pod_specs_dict[pod_name]['clientTemplate']
docker_image_name = pod_specs_dict[pod_name]['dockerImage']
num_instances = pod_specs_dict[pod_name]['numInstances']
# Get the server addresses from the server pod spec object
server_pod_spec_name = pod_specs_dict[pod_name]['serverPodSpec']
server_addresses = server_pod_specs_dict[
server_pod_spec_name].server_addresses()
client_pod_specs_dict[pod_name] = ClientPodSpec(
pod_name, client_templates_dict[client_template_name],
docker_images_dict[docker_image_name], num_instances,
server_addresses)
return client_pod_specs_dict
def _load_config(self, config_filename):
"""Opens the config file and converts the Json text to Dictionary"""
if not os.path.isabs(config_filename):
raise Exception('Config objects expects an absolute file path. '
'config file name passed: %s' % config_filename)
with open(config_filename) as config_file:
return json.load(config_file)
def run_tests(config):
""" The main function that launches the stress tests """
# Build docker images and push to GKE registry
if config.global_settings.build_docker_images:
for name, docker_image in config.docker_images_dict.iteritems():
if not (docker_image.build_image() and
docker_image.push_to_gke_registry()):
return False
# Create a unique id for this run (Note: Using timestamp instead of UUID to
# make it easier to deduce the date/time of the run just by looking at the run
# run id. This is useful in debugging when looking at records in Biq query)
run_id = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
dataset_id = '%s_%s' % (config.global_settings.dataset_id_prefix, run_id)
print('Run id:', run_id)
print('Dataset id:', dataset_id)
bq_helper = BigQueryHelper(run_id, '', '',
config.global_settings.gcp_project_id, dataset_id,
config.global_settings.summary_table_id,
config.global_settings.qps_table_id)
bq_helper.initialize()
gke = Gke(config.global_settings.gcp_project_id, run_id, dataset_id,
config.global_settings.summary_table_id,
config.global_settings.qps_table_id,
config.global_settings.kubernetes_proxy_port)
is_success = True
try:
print('Launching servers..')
for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
if not gke.launch_servers(server_pod_spec):
is_success = False # is_success is checked in the 'finally' block
return False
print('Launched servers. Waiting for %d seconds for the server pods to be '
'fully online') % config.global_settings.pod_warmup_secs
time.sleep(config.global_settings.pod_warmup_secs)
for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
if not gke.launch_clients(client_pod_spec):
is_success = False # is_success is checked in the 'finally' block
return False
print('Launched all clients. Waiting for %d seconds for the client pods to '
'be fully online') % config.global_settings.pod_warmup_secs
time.sleep(config.global_settings.pod_warmup_secs)
start_time = datetime.datetime.now()
end_time = start_time + datetime.timedelta(
seconds=config.global_settings.test_duration_secs)
print('Running the test until %s' % end_time.isoformat())
while True:
if datetime.datetime.now() > end_time:
print('Test was run for %d seconds' %
config.global_settings.test_duration_secs)
break
# Check if either stress server or clients have failed (btw, the bq_helper
# monitors all the rows in the summary table and checks if any of them
# have a failure status)
if bq_helper.check_if_any_tests_failed():
is_success = False
print('Some tests failed.')
break # Don't 'return' here. We still want to call bq_helper to print qps/summary tables
# Tests running fine. Wait until next poll time to check the status
print('Sleeping for %d seconds..' %
config.global_settings.test_poll_interval_secs)
time.sleep(config.global_settings.test_poll_interval_secs)
# Print BiqQuery tables
bq_helper.print_qps_records()
bq_helper.print_summary_records()
finally:
# If there was a test failure, we should not delete the pods since they
# would contain useful debug information (logs, core dumps etc)
if is_success:
for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
gke.delete_servers(server_pod_spec)
for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
gke.delete_clients(client_pod_spec)
return is_success
def tear_down(config):
gke = Gke(config.global_settings.gcp_project_id, '', '',
config.global_settings.summary_table_id,
config.global_settings.qps_table_id,
config.global_settings.kubernetes_proxy_port)
for name, server_pod_spec in config.server_pod_specs_dict.iteritems():
gke.delete_servers(server_pod_spec)
for name, client_pod_spec in config.client_pod_specs_dict.iteritems():
gke.delete_clients(client_pod_spec)
argp = argparse.ArgumentParser(
description='Launch stress tests in GKE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
argp.add_argument('--gcp_project_id',
required=True,
help='The Google Cloud Platform Project Id')
argp.add_argument('--config_file',
required=True,
type=str,
help='The test config file')
argp.add_argument('--tear_down', action='store_true', default=False)
if __name__ == '__main__':
args = argp.parse_args()
config_filename = args.config_file
# Since we will be changing the current working directory to grpc root in the
# next step, we should check if the config filename path is a relative path
# (i.e a path relative to the current working directory) and if so, convert it
# to abosulte path
if not os.path.isabs(config_filename):
config_filename = os.path.abspath(config_filename)
config = Config(config_filename, args.gcp_project_id)
# Change current working directory to grpc root
# (This is important because all relative file paths in the config file are
# supposed to interpreted as relative to the GRPC root)
grpc_root = os.path.abspath(os.path.join(
os.path.dirname(sys.argv[0]), '../../..'))
os.chdir(grpc_root)
# Note that tear_down is only in cases where we want to manually tear down a
# test that for some reason run_tests() could not cleanup
if args.tear_down:
tear_down(config)
sys.exit(1)
if not run_tests(config):
sys.exit(1)
Loading…
Cancel
Save