Merge branch 'master' of https://github.com/grpc/grpc into tracing-overhaul

pull/10833/head
ncteisen 8 years ago
commit 1f6e176cc3
  1. 14
      examples/cpp/helloworld/Makefile
  2. 14
      examples/cpp/route_guide/Makefile
  3. 2
      src/core/lib/iomgr/ev_posix.c
  4. 6
      src/proto/grpc/testing/control.proto
  5. 6
      src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
  6. 11
      src/ruby/lib/grpc/errors.rb
  7. 8
      test/core/channel/minimal_stack_is_minimal_test.c
  8. 2
      test/core/end2end/invalid_call_argument_test.c
  9. 2
      test/cpp/end2end/async_end2end_test.cc
  10. 47
      test/cpp/qps/client_async.cc
  11. 18
      test/cpp/qps/server_async.cc
  12. 23
      tools/gcp/utils/big_query_utils.py
  13. 20
      tools/internal_ci/helper_scripts/prepare_build_macos_rc
  14. 7
      tools/internal_ci/linux/grpc_master.cfg
  15. 7
      tools/internal_ci/linux/grpc_portability.cfg
  16. 7
      tools/internal_ci/linux/grpc_portability_build_only.cfg
  17. 7
      tools/internal_ci/linux/grpc_pull_request_sanity.cfg
  18. 2
      tools/internal_ci/linux/grpc_run_tests_matrix.sh
  19. 7
      tools/internal_ci/linux/grpc_sanity.cfg
  20. 7
      tools/internal_ci/linux/sanitizer/grpc_c_asan.cfg
  21. 7
      tools/internal_ci/linux/sanitizer/grpc_c_msan.cfg
  22. 7
      tools/internal_ci/linux/sanitizer/grpc_c_tsan.cfg
  23. 7
      tools/internal_ci/linux/sanitizer/grpc_c_ubsan.cfg
  24. 7
      tools/internal_ci/linux/sanitizer/grpc_cpp_asan.cfg
  25. 7
      tools/internal_ci/linux/sanitizer/grpc_cpp_tsan.cfg
  26. 19
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_asan.cfg
  27. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_asan.sh
  28. 19
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_msan.cfg
  29. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_msan.sh
  30. 19
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_tsan.cfg
  31. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_tsan.sh
  32. 30
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_ubsan.cfg
  33. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_c_ubsan.sh
  34. 30
      tools/internal_ci/linux/sanitizer/pull_request/grpc_cpp_asan.cfg
  35. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_cpp_asan.sh
  36. 30
      tools/internal_ci/linux/sanitizer/pull_request/grpc_cpp_tsan.cfg
  37. 4
      tools/internal_ci/linux/sanitizer/pull_request/grpc_cpp_tsan.sh
  38. 2
      tools/jenkins/run_performance.sh
  39. 23
      tools/jenkins/run_trickle_diff.sh
  40. 13
      tools/profiling/microbenchmarks/bm_diff/bm_build.py
  41. 3
      tools/profiling/microbenchmarks/bm_diff/bm_constants.py
  42. 35
      tools/profiling/microbenchmarks/bm_diff/bm_diff.py
  43. 22
      tools/profiling/microbenchmarks/bm_diff/bm_main.py
  44. 14
      tools/profiling/microbenchmarks/bm_diff/bm_run.py
  45. 3
      tools/run_tests/dockerize/build_docker_and_run_tests.sh
  46. 634
      tools/run_tests/generated/tests.json
  47. 71
      tools/run_tests/performance/scenario_config.py
  48. 17
      tools/run_tests/python_utils/upload_test_results.py
  49. 5
      tools/run_tests/run_tests.py

@ -17,16 +17,16 @@
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
SYSTEM ?= $(HOST_SYSTEM)
CXX = g++
CPPFLAGS += -I/usr/local/include -pthread
CPPFLAGS += `pkg-config --cflags protobuf grpc`
CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++ grpc` \
-lgrpc++_reflection \
-lprotobuf -lpthread -ldl
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-lgrpc++_reflection\
-ldl
else
LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++ grpc` \
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed \
-lprotobuf -lpthread -ldl
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++ grpc`\
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\
-ldl
endif
PROTOC = protoc
GRPC_CPP_PLUGIN = grpc_cpp_plugin

@ -17,16 +17,16 @@
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
SYSTEM ?= $(HOST_SYSTEM)
CXX = g++
CPPFLAGS += -I/usr/local/include -pthread
CPPFLAGS += `pkg-config --cflags protobuf grpc`
CXXFLAGS += -std=c++11
ifeq ($(SYSTEM),Darwin)
LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++` \
-lgrpc++_reflection \
-lprotobuf -lpthread -ldl
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++`\
-lgrpc++_reflection\
-ldl
else
LDFLAGS += -L/usr/local/lib `pkg-config --libs grpc++` \
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed \
-lprotobuf -lpthread -ldl
LDFLAGS += -L/usr/local/lib `pkg-config --libs protobuf grpc++`\
-Wl,--no-as-needed -lgrpc++_reflection -Wl,--as-needed\
-ldl
endif
PROTOC = protoc
GRPC_CPP_PLUGIN = grpc_cpp_plugin

@ -63,13 +63,13 @@ typedef struct {
} event_engine_factory;
static const event_engine_factory g_factories[] = {
{"epollex", grpc_init_epollex_linux},
{"epollsig", grpc_init_epollsig_linux},
{"epoll1", grpc_init_epoll1_linux},
{"epoll-threadpool", grpc_init_epoll_thread_pool_linux},
{"epoll-limited", grpc_init_epoll_limited_pollers_linux},
{"poll", grpc_init_poll_posix},
{"poll-cv", grpc_init_poll_cv_posix},
{"epollex", grpc_init_epollex_linux},
};
static void add(const char *beg, const char *end, char ***ss, size_t *ns) {

@ -102,6 +102,9 @@ message ClientConfig {
repeated ChannelArg channel_args = 16;
// Number of threads that share each completion queue
int32 threads_per_cq = 17;
// Number of messages on a stream before it gets finished/restarted
int32 messages_per_stream = 18;
}
@ -142,6 +145,9 @@ message ServerConfig {
// If we use an OTHER_SERVER client_type, this string gives more detail
string other_server_api = 11;
// Number of threads that share each completion queue
int32 threads_per_cq = 12;
// c++-only options (for now) --------------------------------
// Buffer pool size (no buffer pool specified if unset)

@ -25,8 +25,6 @@ from google.protobuf import descriptor_pool
from google.protobuf import descriptor_pb2
from src.proto.grpc.testing import empty_pb2
#empty2_pb2 is imported for import-consequent side-effects.
from src.proto.grpc.testing.proto2 import empty2_pb2 # pylint: disable=unused-import
from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
from tests.unit.framework.common import test_constants
@ -48,12 +46,10 @@ def _file_descriptor_to_proto(descriptor):
class ReflectionServicerTest(unittest.TestCase):
def setUp(self):
servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES)
server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(server_pool)
reflection.enable_server_reflection(_SERVICE_NAMES, self._server)
port = self._server.add_insecure_port('[::]:0')
reflection_pb2_grpc.add_ServerReflectionServicer_to_server(servicer,
self._server)
self._server.start()
channel = grpc.insecure_channel('localhost:%d' % port)

@ -50,7 +50,8 @@ module GRPC
Struct::Status.new(code, details, @metadata)
end
def self.new_status_exception(code, details = 'unkown cause', metadata = {})
def self.new_status_exception(code, details = 'unknown cause',
metadata = {})
codes = {}
codes[OK] = Ok
codes[CANCELLED] = Cancelled
@ -59,16 +60,16 @@ module GRPC
codes[DEADLINE_EXCEEDED] = DeadlineExceeded
codes[NOT_FOUND] = NotFound
codes[ALREADY_EXISTS] = AlreadyExists
codes[PERMISSION_DENIED] = PermissionDenied
codes[PERMISSION_DENIED] = PermissionDenied
codes[UNAUTHENTICATED] = Unauthenticated
codes[RESOURCE_EXHAUSTED] = ResourceExhausted
codes[FAILED_PRECONDITION] = FailedPrecondition
codes[ABORTED] = Aborted
codes[OUT_OF_RANGE] = OutOfRange
codes[UNIMPLEMENTED] = Unimplemented
codes[UNIMPLEMENTED] = Unimplemented
codes[INTERNAL] = Internal
codes[UNIMPLEMENTED] = Unimplemented
codes[UNAVAILABLE] = Unavailable
codes[UNIMPLEMENTED] = Unimplemented
codes[UNAVAILABLE] = Unavailable
codes[DATA_LOSS] = DataLoss
if codes[code].nil?

@ -44,7 +44,7 @@
// use CHECK_STACK instead
static int check_stack(const char *file, int line, const char *transport_name,
grpc_channel_args *init_args,
grpc_channel_stack_type channel_stack_type, ...);
unsigned channel_stack_type, ...);
// arguments: const char *transport_name - the name of the transport type to
// simulate
@ -111,7 +111,7 @@ int main(int argc, char **argv) {
static int check_stack(const char *file, int line, const char *transport_name,
grpc_channel_args *init_args,
grpc_channel_stack_type channel_stack_type, ...) {
unsigned channel_stack_type, ...) {
// create dummy channel stack
grpc_channel_stack_builder *builder = grpc_channel_stack_builder_create();
grpc_transport_vtable fake_transport_vtable = {.name = transport_name};
@ -125,8 +125,8 @@ static int check_stack(const char *file, int line, const char *transport_name,
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_channel_stack_builder_set_channel_arguments(&exec_ctx, builder,
channel_args);
GPR_ASSERT(
grpc_channel_init_create_stack(&exec_ctx, builder, channel_stack_type));
GPR_ASSERT(grpc_channel_init_create_stack(
&exec_ctx, builder, (grpc_channel_stack_type)channel_stack_type));
grpc_exec_ctx_finish(&exec_ctx);
}

@ -59,7 +59,7 @@ static void prepare_test(int is_client) {
g_state.is_client = is_client;
grpc_metadata_array_init(&g_state.initial_metadata_recv);
grpc_metadata_array_init(&g_state.trailing_metadata_recv);
g_state.deadline = grpc_timeout_seconds_to_deadline(2);
g_state.deadline = grpc_timeout_seconds_to_deadline(5);
g_state.cq = grpc_completion_queue_create_for_next(NULL);
g_state.cqv = cq_verifier_create(g_state.cq);
g_state.details = grpc_empty_slice();

@ -196,7 +196,7 @@ bool plugin_has_sync_methods(std::unique_ptr<ServerBuilderPlugin>& plugin) {
// This class disables the server builder plugins that may add sync services to
// the server. If there are sync services, UnimplementedRpc test will triger
// the sync unkown rpc routine on the server side, rather than the async one
// the sync unknown rpc routine on the server side, rather than the async one
// that needs to be tested here.
class ServerBuilderSyncPluginDisabler : public ::grpc::ServerBuilderOption {
public:

@ -55,6 +55,11 @@ class ClientRpcContext {
}
virtual void Start(CompletionQueue* cq, const ClientConfig& config) = 0;
void lock() { mu_.lock(); }
void unlock() { mu_.unlock(); }
private:
std::mutex mu_;
};
template <class RequestType, class ResponseType>
@ -106,6 +111,7 @@ class ClientRpcContextUnaryImpl : public ClientRpcContext {
void StartNewClone(CompletionQueue* cq) override {
auto* clone = new ClientRpcContextUnaryImpl(stub_, req_, next_issue_,
start_req_, callback_);
std::lock_guard<ClientRpcContext> lclone(*clone);
clone->StartInternal(cq);
}
@ -163,8 +169,14 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
num_async_threads_(NumThreads(config)) {
SetupLoadTest(config, num_async_threads_);
for (int i = 0; i < num_async_threads_; i++) {
int tpc = std::max(1, config.threads_per_cq()); // 1 if unspecified
int num_cqs = (num_async_threads_ + tpc - 1) / tpc; // ceiling operator
for (int i = 0; i < num_cqs; i++) {
cli_cqs_.emplace_back(new CompletionQueue);
}
for (int i = 0; i < num_async_threads_; i++) {
cq_.emplace_back(i % cli_cqs_.size());
next_issuers_.emplace_back(NextIssuer(i));
shutdown_state_.emplace_back(new PerThreadShutdownState());
}
@ -231,20 +243,36 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
void* got_tag;
bool ok;
if (cli_cqs_[thread_idx]->Next(&got_tag, &ok)) {
if (cli_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
// Got a regular event, so process it
ClientRpcContext* ctx = ClientRpcContext::detag(got_tag);
// Proceed while holding a lock to make sure that
// this thread isn't supposed to shut down
std::lock_guard<std::mutex> l(shutdown_state_[thread_idx]->mutex);
if (shutdown_state_[thread_idx]->shutdown) {
// We want to delete the context. However, it is possible that
// another thread that just initiated an action on this
// context still has its lock even though the action on the
// context has completed. To delay for that, just grab the
// lock for serialization. Take a new scope.
{ std::lock_guard<ClientRpcContext> lctx(*ctx); }
delete ctx;
return true;
} else if (!ctx->RunNextState(ok, entry)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
ctx->StartNewClone(cli_cqs_[thread_idx].get());
// delete the old version
}
bool del = false;
// Create a new scope for a lock_guard'ed region
{
std::lock_guard<ClientRpcContext> lctx(*ctx);
if (!ctx->RunNextState(ok, entry)) {
// The RPC and callback are done, so clone the ctx
// and kickstart the new one
ctx->StartNewClone(cli_cqs_[cq_[thread_idx]].get());
// set the old version to delete
del = true;
}
}
if (del) {
delete ctx;
}
return true;
@ -255,6 +283,7 @@ class AsyncClient : public ClientImpl<StubType, RequestType> {
}
std::vector<std::unique_ptr<CompletionQueue>> cli_cqs_;
std::vector<int> cq_;
std::vector<std::function<gpr_timespec()>> next_issuers_;
std::vector<std::unique_ptr<PerThreadShutdownState>> shutdown_state_;
};
@ -377,6 +406,7 @@ class ClientRpcContextStreamingPingPongImpl : public ClientRpcContext {
void StartNewClone(CompletionQueue* cq) override {
auto* clone = new ClientRpcContextStreamingPingPongImpl(
stub_, req_, next_issue_, start_req_, callback_);
std::lock_guard<ClientRpcContext> lclone(*clone);
clone->StartInternal(cq, messages_per_stream_);
}
@ -515,6 +545,7 @@ class ClientRpcContextStreamingFromClientImpl : public ClientRpcContext {
void StartNewClone(CompletionQueue* cq) override {
auto* clone = new ClientRpcContextStreamingFromClientImpl(
stub_, req_, next_issue_, start_req_, callback_);
std::lock_guard<ClientRpcContext> lclone(*clone);
clone->StartInternal(cq);
}
@ -632,6 +663,7 @@ class ClientRpcContextStreamingFromServerImpl : public ClientRpcContext {
void StartNewClone(CompletionQueue* cq) override {
auto* clone = new ClientRpcContextStreamingFromServerImpl(
stub_, req_, next_issue_, start_req_, callback_);
std::lock_guard<ClientRpcContext> lclone(*clone);
clone->StartInternal(cq);
}
@ -774,6 +806,7 @@ class ClientRpcContextGenericStreamingImpl : public ClientRpcContext {
void StartNewClone(CompletionQueue* cq) override {
auto* clone = new ClientRpcContextGenericStreamingImpl(
stub_, req_, next_issue_, start_req_, callback_);
std::lock_guard<ClientRpcContext> lclone(*clone);
clone->StartInternal(cq, messages_per_stream_);
}

@ -16,6 +16,7 @@
*
*/
#include <algorithm>
#include <forward_list>
#include <functional>
#include <memory>
@ -89,9 +90,14 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
gpr_log(GPR_INFO, "Sizing async server to %d threads", num_threads);
}
for (int i = 0; i < num_threads; i++) {
int tpc = std::max(1, config.threads_per_cq()); // 1 if unspecified
int num_cqs = (num_threads + tpc - 1) / tpc; // ceiling operator
for (int i = 0; i < num_cqs; i++) {
srv_cqs_.emplace_back(builder.AddCompletionQueue());
}
for (int i = 0; i < num_threads; i++) {
cq_.emplace_back(i % srv_cqs_.size());
}
if (config.resource_quota_size() > 0) {
builder.SetResourceQuota(ResourceQuota("AsyncQpsServerTest")
@ -105,7 +111,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::placeholders::_2);
for (int i = 0; i < 5000; i++) {
for (int j = 0; j < num_threads; j++) {
for (int j = 0; j < num_cqs; j++) {
if (request_unary_function) {
auto request_unary = std::bind(
request_unary_function, &async_service_, std::placeholders::_1,
@ -190,7 +196,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
// Wait until work is available or we are shutting down
bool ok;
void *got_tag;
while (srv_cqs_[thread_idx]->Next(&got_tag, &ok)) {
while (srv_cqs_[cq_[thread_idx]]->Next(&got_tag, &ok)) {
ServerRpcContext *ctx = detag(got_tag);
// The tag is a pointer to an RPC context to invoke
// Proceed while holding a lock to make sure that
@ -199,6 +205,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
if (shutdown_state_[thread_idx]->shutdown) {
return;
}
std::lock_guard<ServerRpcContext> l2(*ctx);
const bool still_going = ctx->RunNextState(ok);
// if this RPC context is done, refresh it
if (!still_going) {
@ -211,9 +218,13 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
class ServerRpcContext {
public:
ServerRpcContext() {}
void lock() { mu_.lock(); }
void unlock() { mu_.unlock(); }
virtual ~ServerRpcContext(){};
virtual bool RunNextState(bool) = 0; // next state, return false if done
virtual void Reset() = 0; // start this back at a clean state
private:
std::mutex mu_;
};
static void *tag(ServerRpcContext *func) {
return reinterpret_cast<void *>(func);
@ -503,6 +514,7 @@ class AsyncQpsServerTest final : public grpc::testing::Server {
std::vector<std::thread> threads_;
std::unique_ptr<grpc::Server> server_;
std::vector<std::unique_ptr<grpc::ServerCompletionQueue>> srv_cqs_;
std::vector<int> cq_;
ServiceType async_service_;
std::vector<std::unique_ptr<ServerRpcContext>> contexts_;

@ -22,6 +22,8 @@ from apiclient import discovery
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
# 30 days in milliseconds
_EXPIRATION_MS = 30 * 24 * 60 * 60 * 1000
NUM_RETRIES = 3
@ -64,8 +66,21 @@ def create_table(big_query, project_id, dataset_id, table_id, table_schema,
fields, description)
def create_partitioned_table(big_query, project_id, dataset_id, table_id, table_schema,
description, partition_type='DAY', expiration_ms=_EXPIRATION_MS):
"""Creates a partitioned table. By default, a date-paritioned table is created with
each partition lasting 30 days after it was last modified.
"""
fields = [{'name': field_name,
'type': field_type,
'description': field_description
} for (field_name, field_type, field_description) in table_schema]
return create_table2(big_query, project_id, dataset_id, table_id,
fields, description, partition_type, expiration_ms)
def create_table2(big_query, project_id, dataset_id, table_id, fields_schema,
description):
description, partition_type=None, expiration_ms=None):
is_success = True
body = {
@ -80,6 +95,12 @@ def create_table2(big_query, project_id, dataset_id, table_id, fields_schema,
}
}
if partition_type and expiration_ms:
body["timePartitioning"] = {
"type": partition_type,
"expirationMs": expiration_ms
}
try:
table_req = big_query.tables().insert(projectId=project_id,
datasetId=dataset_id,

@ -21,8 +21,24 @@
yes | ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install autoconf automake libtool ccache cmake gflags gpg wget
# TODO(jtattermusch): install rvm & ruby
# TODO(jtattermusch): install cocoapods
# TODO(jtattermusch): hkp://keys.gnupg.net fails with "No route to host"
gpg --keyserver hkp://193.164.133.100 --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
curl -sSL https://get.rvm.io | sudo bash -s stable
# add ourselves to rvm group to prevent later "access denied" errors.
sudo dseditgroup -o edit -a `whoami` -t user rvm
set +ex
source /etc/profile.d/rvm.sh
rvm install ruby-2.3
gem install bundler
rvm osx-ssl-certs status all
rvm osx-ssl-certs update all
set -ex
# cocoapods
gem install cocoapods --version 1.0.0
# python
wget -q https://bootstrap.pypa.io/get-pip.py

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_master.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 240
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f basictests linux --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_portability.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f portability linux --inner_jobs 16 -j 1 --internal_ci"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_portability_build_only.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 180
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f portability linux --internal_ci --build_only"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_sanity.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 30
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f basictests linux sanity opt --inner_jobs 16 -j 1 --internal_ci"
}

@ -20,4 +20,4 @@ cd $(dirname $0)/../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f portability linux --internal_ci --build_only
tools/run_tests/run_tests_matrix.py $RUN_TESTS_FLAGS

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_sanity.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 20
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f basictests linux sanity opt --inner_jobs 16 -j 1 --internal_ci"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_c_asan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c asan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_c_msan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c msan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_c_tsan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c tsan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_c_ubsan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c ubsan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_cpp_asan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c++ asan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -15,10 +15,15 @@
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/sanitizer/grpc_cpp_tsan.sh"
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c++ tsan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results"
}

@ -13,11 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Config file for the internal CI (in protobuf text format)
# change to grpc repo root
cd $(dirname $0)/../../..
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f basictests linux --inner_jobs 16 -j 1 --internal_ci
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c asan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c asan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c asan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -13,11 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Config file for the internal CI (in protobuf text format)
# change to grpc repo root
cd $(dirname $0)/../../..
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f portability linux --inner_jobs 16 -j 1 --internal_ci
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c msan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c msan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c msan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -13,11 +13,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# Config file for the internal CI (in protobuf text format)
# change to grpc repo root
cd $(dirname $0)/../../..
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f basictests linux sanity opt --inner_jobs 16 -j 1 --internal_ci
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c tsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c tsan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c tsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c ubsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c ubsan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c ubsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c++ asan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c++ asan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c++ asan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -0,0 +1,30 @@
#!/bin/bash
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Config file for the internal CI (in protobuf text format)
# Location of the continuous shell script in repository.
build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh"
timeout_mins: 1440
action {
define_artifacts {
regex: "**/*sponge_log.xml"
}
}
env_vars {
key: "RUN_TESTS_FLAGS"
value: "-f c++ tsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600"
}

@ -16,8 +16,8 @@
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
cd $(dirname $0)/../../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c++ tsan --inner_jobs 16 -j 1 --internal_ci
tools/run_tests/run_tests_matrix.py -f c++ tsan --inner_jobs 16 -j 1 --internal_ci --filter_pr_tests --base_branch origin/master --max_time=3600

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is invoked by Jenkins and runs performance smoke test.
# This script is invoked by Jenkins and runs a diff on the microbenchmarks
set -ex
# List of benchmarks that provide good signal for analyzing performance changes in pull requests

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is invoked by Jenkins and runs a diff on bm_fullstack_trickle
set -ex
# Enter the gRPC repo root
cd $(dirname $0)/../..
tools/run_tests/start_port_server.py
tools/profiling/microbenchmarks/bm_diff/bm_main.py -d origin/$ghprbTargetBranch -b bm_fullstack_trickle -l 4 -t cli_transport_stalls cli_stream_stalls svr_transport_stalls svr_stream_stalls --no-counters --pr_comment_name trickle

@ -46,6 +46,9 @@ def _args():
type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
return args
@ -55,16 +58,18 @@ def _make_cmd(cfg, benchmarks, jobs):
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs):
def build(name, benchmarks, jobs, counters):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
if counters:
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
'bm_diff_%s' % name,)
@ -72,4 +77,4 @@ def build(name, benchmarks, jobs):
if __name__ == '__main__':
args = _args()
build(args.name, args.benchmarks, args.jobs)
build(args.name, args.benchmarks, args.jobs, args.counters)

@ -26,4 +26,5 @@ _AVAILABLE_BENCHMARK_TESTS = [
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration',)
'nows_per_iteration', 'cli_transport_stalls', 'cli_stream_stalls',
'svr_transport_stalls', 'svr_stream_stalls',)

@ -67,6 +67,9 @@ def _args():
default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
argp.add_argument('-n', '--new', type=str, help='New benchmark name')
argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
argp.add_argument(
@ -121,7 +124,8 @@ def _read_json(filename, badjson_files, nonexistant_files):
stripped = ".".join(filename.split(".")[:-2])
try:
with open(filename) as f:
return json.loads(f.read())
r = f.read();
return json.loads(r)
except IOError, e:
if stripped in nonexistant_files:
nonexistant_files[stripped] += 1
@ -129,14 +133,17 @@ def _read_json(filename, badjson_files, nonexistant_files):
nonexistant_files[stripped] = 1
return None
except ValueError, e:
print r
if stripped in badjson_files:
badjson_files[stripped] += 1
else:
badjson_files[stripped] = 1
return None
def fmt_dict(d):
return ''.join([" " + k + ": " + str(d[k]) + "\n" for k in d])
def diff(bms, loops, track, old, new):
def diff(bms, loops, track, old, new, counters):
benchmarks = collections.defaultdict(Benchmark)
badjson_files = {}
@ -148,18 +155,22 @@ def diff(bms, loops, track, old, new):
'--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
if counters:
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
else:
js_new_ctr = None
js_old_ctr = None
if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
@ -187,12 +198,12 @@ def diff(bms, loops, track, old, new):
rows.append([name] + benchmarks[name].row(fields))
note = None
if len(badjson_files):
note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str(badjson_files)
note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(badjson_files)
if len(nonexistant_files):
if note:
note += '\n\nMissing files (indicates new benchmark) = %s' % str(nonexistant_files)
note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
else:
note = '\n\nMissing files (indicates new benchmark) = %s' % str(nonexistant_files)
note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else:
@ -202,5 +213,5 @@ def diff(bms, loops, track, old, new):
if __name__ == '__main__':
args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old,
args.new)
args.new, args.counters)
print('%s\n%s' % (note, diff if diff else "No performance differences"))

@ -80,6 +80,14 @@ def _args():
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
argp.add_argument(
'--pr_comment_name',
type=str,
default="microbenchmarks",
help='Name that Jenkins will use to commen on the PR')
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.diff_base or args.old, "One of diff_base or old must be set!"
if args.loops < 3:
@ -103,7 +111,7 @@ def eintr_be_gone(fn):
def main(args):
bm_build.build('new', args.benchmarks, args.jobs)
bm_build.build('new', args.benchmarks, args.jobs, args.counters)
old = args.old
if args.diff_base:
@ -112,20 +120,20 @@ def main(args):
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
try:
bm_build.build('old', args.benchmarks, args.jobs)
bm_build.build(old, args.benchmarks, args.jobs, args.counters)
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions)
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
'new')
'new', args.counters)
if diff:
text = 'Performance differences noted:\n' + diff
text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)
else:
text = 'No significant performance differences'
text = '[%s] No significant performance differences' % args.pr_comment_name
if note:
text = note + '\n\n' + text
print('%s' % text)

@ -67,6 +67,9 @@ def _args():
default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument('--counters', dest='counters', action='store_true')
argp.add_argument('--no-counters', dest='counters', action='store_false')
argp.set_defaults(counters=True)
args = argp.parse_args()
assert args.name
if args.loops < 3:
@ -93,21 +96,22 @@ def _collect_bm_data(bm, cfg, name, reps, idx, loops):
shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
loops),
verbose_success=True,
timeout_seconds=60 * 2))
timeout_seconds=60 * 60)) # one hour
return jobs_list
def run(name, benchmarks, jobs, loops, reps):
def run(name, benchmarks, jobs, loops, reps, counters):
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
loops)
if counters:
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
loops)
random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=jobs)
if __name__ == '__main__':
args = _args()
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions, args.counters)

@ -66,6 +66,9 @@ docker run \
-e "BUILD_ID=$BUILD_ID" \
-e "BUILD_URL=$BUILD_URL" \
-e "JOB_BASE_NAME=$JOB_BASE_NAME" \
-e "KOKORO_BUILD_NUMBER=$KOKORO_BUILD_NUMBER" \
-e "KOKORO_BUILD_URL=$KOKORO_BUILD_URL" \
-e "KOKORO_JOB_NAME=$KOKORO_JOB_NAME" \
-i $TTY_FLAG \
--sysctl net.ipv6.conf.all.disable_ipv6=0 \
-v ~/.config/gcloud:/root/.config/gcloud \

File diff suppressed because it is too large Load Diff

@ -38,7 +38,7 @@ HISTOGRAM_PARAMS = {
# actual target will be slightly higher)
OUTSTANDING_REQUESTS={
'async': 6400,
'async-1core': 800,
'async-limited': 800,
'sync': 1000
}
@ -93,6 +93,8 @@ def _ping_pong_scenario(name, rpc_type,
client_language=None,
server_language=None,
async_server_threads=0,
server_threads_per_cq=0,
client_threads_per_cq=0,
warmup_seconds=WARMUP_SECONDS,
categories=DEFAULT_CATEGORIES,
channels=None,
@ -112,6 +114,7 @@ def _ping_pong_scenario(name, rpc_type,
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'threads_per_cq': client_threads_per_cq,
'rpc_type': rpc_type,
'load_params': {
'closed_loop': {}
@ -122,6 +125,7 @@ def _ping_pong_scenario(name, rpc_type,
'server_type': server_type,
'security_params': _get_secargs(secure),
'async_server_threads': async_server_threads,
'threads_per_cq': server_threads_per_cq,
},
'warmup_seconds': warmup_seconds,
'benchmark_seconds': BENCHMARK_SECONDS
@ -265,12 +269,73 @@ class CXXLanguage:
secure=secure,
categories=smoketest_categories+[SCALABLE])
# TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
#yield _ping_pong_scenario(
# 'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
# rpc_type='STREAMING',
# client_type='ASYNC_CLIENT',
# server_type='ASYNC_GENERIC_SERVER',
# unconstrained_client='async-limited', use_generic_payload=True,
# secure=secure,
# client_threads_per_cq=1000000, server_threads_per_cq=1000000,
# categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async', use_generic_payload=True,
secure=secure,
client_threads_per_cq=2, server_threads_per_cq=2,
categories=smoketest_categories+[SCALABLE])
#yield _ping_pong_scenario(
# 'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
# rpc_type='STREAMING',
# client_type='ASYNC_CLIENT',
# server_type='ASYNC_SERVER',
# unconstrained_client='async-limited',
# secure=secure,
# client_threads_per_cq=1000000, server_threads_per_cq=1000000,
# categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
client_threads_per_cq=2, server_threads_per_cq=2,
categories=smoketest_categories+[SCALABLE])
#yield _ping_pong_scenario(
# 'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
# rpc_type='UNARY',
# client_type='ASYNC_CLIENT',
# server_type='ASYNC_SERVER',
# unconstrained_client='async-limited',
# secure=secure,
# client_threads_per_cq=1000000, server_threads_per_cq=1000000,
# categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' % secstr,
rpc_type='UNARY',
client_type='ASYNC_CLIENT',
server_type='ASYNC_SERVER',
unconstrained_client='async',
secure=secure,
client_threads_per_cq=2, server_threads_per_cq=2,
categories=smoketest_categories+[SCALABLE])
yield _ping_pong_scenario(
'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
rpc_type='STREAMING',
client_type='ASYNC_CLIENT',
server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async-1core', use_generic_payload=True,
unconstrained_client='async-limited', use_generic_payload=True,
async_server_threads=1,
secure=secure)
@ -753,7 +818,7 @@ class JavaLanguage:
yield _ping_pong_scenario(
'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
unconstrained_client='async-1core', use_generic_payload=True,
unconstrained_client='async-limited', use_generic_payload=True,
async_server_threads=1,
secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)

@ -30,6 +30,9 @@ import big_query_utils
_DATASET_ID = 'jenkins_test_results'
_DESCRIPTION = 'Test results from master job run on Jenkins'
# 90 days in milliseconds
_EXPIRATION_MS = 90 * 24 * 60 * 60 * 1000
_PARTITION_TYPE = 'DAY'
_PROJECT_ID = 'grpc-testing'
_RESULTS_SCHEMA = [
('job_name', 'STRING', 'Name of Jenkins job'),
@ -50,10 +53,12 @@ _RESULTS_SCHEMA = [
def _get_build_metadata(test_results):
"""Add Jenkins build metadata to test_results based on environment variables set by Jenkins."""
build_id = os.getenv('BUILD_ID')
build_url = os.getenv('BUILD_URL')
job_name = os.getenv('JOB_BASE_NAME')
"""Add Jenkins/Kokoro build metadata to test_results based on environment
variables set by Jenkins/Kokoro.
"""
build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
if build_id:
test_results['build_id'] = build_id
@ -62,6 +67,7 @@ def _get_build_metadata(test_results):
if job_name:
test_results['job_name'] = job_name
def upload_results_to_bq(resultset, bq_table, args, platform):
"""Upload test results to a BQ table.
@ -72,7 +78,8 @@ def upload_results_to_bq(resultset, bq_table, args, platform):
platform: string name of platform tests were run on
"""
bq = big_query_utils.create_big_query()
big_query_utils.create_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION)
big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
for shortname, results in six.iteritems(resultset):
for result in results:

@ -63,8 +63,8 @@ _FORCE_ENVIRON_FOR_WRAPPERS = {
}
_POLLING_STRATEGIES = {
'linux': ['epollex', 'epollsig', 'poll', 'poll-cv'],
# TODO(ctiller, sreecha): enable epoll1, epoll-thread-pool
'linux': ['epollsig', 'poll', 'poll-cv'],
# TODO(ctiller, sreecha): enable epoll1, epollex, epoll-thread-pool
'mac': ['poll'],
}
@ -83,7 +83,6 @@ def get_flaky_tests(limit=None):
WHERE
timestamp >= DATE_ADD(DATE(CURRENT_TIMESTAMP()), -1, "WEEK")
AND NOT REGEXP_MATCH(job_name, '.*portability.*')
AND REGEXP_MATCH(job_name, '.*master.*')
GROUP BY
test_name
HAVING

Loading…
Cancel
Save