Merge branch 'error' of github.com:ctiller/grpc into error

pull/6352/head
Craig Tiller 9 years ago
commit adf9e68292
  1. 2
      examples/python/route_guide/route_guide_server.py
  2. 50
      src/node/performance/benchmark_client.js
  3. 1
      src/objective-c/GRPCClient/GRPCCall.m
  4. 35
      src/php/bin/stress_client.sh
  5. 2
      src/php/tests/interop/stress_client.php
  6. 4
      src/python/grpcio/grpc/__init__.py
  7. 2
      src/python/grpcio/grpc/_adapter/_types.py
  8. 4
      src/python/grpcio/grpc/_common.py
  9. 2
      src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
  10. 3
      src/python/grpcio/grpc/beta/interfaces.py
  11. 9
      src/python/grpcio/tests/qps/benchmark_client.py
  12. 2
      src/python/grpcio/tests/tests.json
  13. 14
      src/python/grpcio/tests/unit/_api_test.py
  14. 10
      src/python/grpcio/tests/unit/beta/_connectivity_channel_test.py
  15. 7
      templates/tools/dockerfile/run_tests_addons.include
  16. 6
      templates/tools/dockerfile/run_tests_addons_nocache.include
  17. 65
      templates/tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile.template
  18. 3
      templates/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile.template
  19. 16
      test/cpp/interop/server_main.cc
  20. 35
      test/cpp/qps/client.h
  21. 2
      tools/buildgen/plugins/make_fuzzer_tests.py
  22. 140
      tools/dockerfile/stress_test/grpc_interop_stress_php/Dockerfile
  23. 54
      tools/dockerfile/stress_test/grpc_interop_stress_php/build_interop_stress.sh
  24. 8
      tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile
  25. 20
      tools/run_tests/run_tests.py
  26. 93
      tools/run_tests/stress_test/configs/php-cxx.json
  27. 13432
      tools/run_tests/tests.json

@ -51,7 +51,7 @@ def get_distance(start, end):
coord_factor = 10000000.0
lat_1 = start.latitude / coord_factor
lat_2 = end.latitude / coord_factor
lon_1 = start.latitude / coord_factor
lon_1 = start.longitude / coord_factor
lon_2 = end.longitude / coord_factor
lat_rad_1 = math.radians(lat_1)
lat_rad_2 = math.radians(lat_2)

@ -42,6 +42,8 @@ var fs = require('fs');
var path = require('path');
var util = require('util');
var EventEmitter = require('events');
var async = require('async');
var _ = require('lodash');
var PoissonProcess = require('poisson-process');
var Histogram = require('./histogram');
@ -127,6 +129,36 @@ function BenchmarkClient(server_targets, channels, histogram_params,
util.inherits(BenchmarkClient, EventEmitter);
/**
* Start every client in the list of clients by waiting for each to be ready,
* then starting outstanding_rpcs_per_channel calls on each of them
* @param {Array<grpc.Client>} client_list The list of clients
* @param {Number} outstanding_rpcs_per_channel The number of calls to start
* on each client
* @param {function(grpc.Client)} makeCall Function to make a single call on
* a single client
* @param {EventEmitter} emitter The event emitter to send errors on, if
* necessary
*/
function startAllClients(client_list, outstanding_rpcs_per_channel, makeCall,
emitter) {
var ready_wait_funcs = _.map(client_list, function(client) {
return _.partial(grpc.waitForClientReady, client, Infinity);
});
async.parallel(ready_wait_funcs, function(err) {
if (err) {
emitter.emit('error', err);
return;
}
_.each(client_list, function(client) {
_.times(outstanding_rpcs_per_channel, function() {
makeCall(client);
});
});
});
}
/**
* Start a closed-loop test. For each channel, start
* outstanding_rpcs_per_channel RPCs. Then, whenever an RPC finishes, start
@ -212,11 +244,7 @@ BenchmarkClient.prototype.startClosedLoop = function(
};
}
_.each(client_list, function(client) {
_.times(outstanding_rpcs_per_channel, function() {
makeCall(client);
});
});
startAllClients(client_list, outstanding_rpcs_per_channel, makeCall, self);
};
/**
@ -310,14 +338,12 @@ BenchmarkClient.prototype.startPoisson = function(
var averageIntervalMs = (1 / offered_load) * 1000;
_.each(client_list, function(client) {
_.times(outstanding_rpcs_per_channel, function() {
var p = PoissonProcess.create(averageIntervalMs, function() {
makeCall(client, p);
});
p.start();
startAllClients(client_list, outstanding_rpcs_per_channel, function(client){
var p = PoissonProcess.create(averageIntervalMs, function() {
makeCall(client, p);
});
});
p.start();
}, self);
};
/**

@ -76,7 +76,6 @@ NSString * const kGRPCTrailersKey = @"io.grpc.TrailersKey";
NSString *_host;
NSString *_path;
GRPCWrappedCall *_wrappedCall;
dispatch_once_t _callAlreadyInvoked;
GRPCConnectivityMonitor *_connectivityMonitor;
// The C gRPC library has less guarantees on the ordering of events than we

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -e
cd $(dirname $0)
source ./determine_extension_dir.sh
php $extension_dir -d max_execution_time=300 \
../tests/interop/stress_client.php $@ 1>&2

@ -102,7 +102,7 @@ if (empty($raw_args['server_addresses'])) {
}
$args['metrics_port'] = empty($raw_args['metrics_port']) ?
'8081' : $args['metrics_port'];
'8081' : $raw_args['metrics_port'];
$args['test_duration_secs'] = empty($raw_args['test_duration_secs']) ||
$raw_args['test_duration_secs'] == -1 ?

@ -212,14 +212,14 @@ class ChannelConnectivity(enum.Enum):
READY: The channel is ready to conduct RPCs.
TRANSIENT_FAILURE: The channel has seen a failure from which it expects to
recover.
FATAL_FAILURE: The channel has seen a failure from which it cannot recover.
SHUTDOWN: The channel has seen a failure from which it cannot recover.
"""
IDLE = (_cygrpc.ConnectivityState.idle, 'idle')
CONNECTING = (_cygrpc.ConnectivityState.connecting, 'connecting')
READY = (_cygrpc.ConnectivityState.ready, 'ready')
TRANSIENT_FAILURE = (
_cygrpc.ConnectivityState.transient_failure, 'transient failure')
FATAL_FAILURE = (_cygrpc.ConnectivityState.fatal_failure, 'fatal failure')
SHUTDOWN = (_cygrpc.ConnectivityState.shutdown, 'shutdown')
@enum.unique

@ -114,7 +114,7 @@ class ConnectivityState(enum.IntEnum):
CONNECTING = cygrpc.ConnectivityState.connecting
READY = cygrpc.ConnectivityState.ready
TRANSIENT_FAILURE = cygrpc.ConnectivityState.transient_failure
FATAL_FAILURE = cygrpc.ConnectivityState.fatal_failure
FATAL_FAILURE = cygrpc.ConnectivityState.shutdown
class Status(collections.namedtuple(

@ -46,8 +46,8 @@ CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.ready: grpc.ChannelConnectivity.READY,
cygrpc.ConnectivityState.transient_failure:
grpc.ChannelConnectivity.TRANSIENT_FAILURE,
cygrpc.ConnectivityState.fatal_failure:
grpc.ChannelConnectivity.FATAL_FAILURE,
cygrpc.ConnectivityState.shutdown:
grpc.ChannelConnectivity.SHUTDOWN,
}
CYGRPC_STATUS_CODE_TO_STATUS_CODE = {

@ -33,7 +33,7 @@ class ConnectivityState:
connecting = GRPC_CHANNEL_CONNECTING
ready = GRPC_CHANNEL_READY
transient_failure = GRPC_CHANNEL_TRANSIENT_FAILURE
fatal_failure = GRPC_CHANNEL_SHUTDOWN
shutdown = GRPC_CHANNEL_SHUTDOWN
class ChannelArgKey:

@ -36,6 +36,9 @@ import six
import grpc
ChannelConnectivity = grpc.ChannelConnectivity
# FATAL_FAILURE was a Beta-API name for SHUTDOWN
ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN
StatusCode = grpc.StatusCode

@ -30,11 +30,13 @@
"""Defines test client behaviors (UNARY/STREAMING) (SYNC/ASYNC)."""
import abc
import threading
import time
from concurrent import futures
from six.moves import queue
import grpc
from grpc.beta import implementations
from grpc.framework.interfaces.face import face
from src.proto.grpc.testing import messages_pb2
@ -62,6 +64,13 @@ class BenchmarkClient:
else:
channel = implementations.insecure_channel(host, port)
connected_event = threading.Event()
def wait_for_ready(connectivity):
if connectivity == grpc.ChannelConnectivity.READY:
connected_event.set()
channel.subscribe(wait_for_ready, try_to_connect=True)
connected_event.wait()
if config.payload_config.WhichOneof('payload') == 'simple_params':
self._generic = False
self._stub = services_pb2.beta_create_BenchmarkService_stub(channel)

@ -1,5 +1,6 @@
[
"_api_test.AllTest",
"_api_test.ChannelConnectivityTest",
"_auth_test.AccessTokenCallCredentialsTest",
"_auth_test.GoogleCallCredentialsTest",
"_base_interface_test.AsyncEasyTest",
@ -13,6 +14,7 @@
"_channel_ready_future_test.ChannelReadyFutureTest",
"_channel_test.ChannelTest",
"_connectivity_channel_test.ChannelConnectivityTest",
"_connectivity_channel_test.ConnectivityStatesTest",
"_core_over_links_base_interface_test.AsyncEasyTest",
"_core_over_links_base_interface_test.AsyncPeasyTest",
"_core_over_links_base_interface_test.SyncEasyTest",

@ -33,6 +33,8 @@ import unittest
import six
import grpc
from tests.unit import _from_grpc_import_star
@ -86,5 +88,17 @@ class AllTest(unittest.TestCase):
_from_grpc_import_star.GRPC_ELEMENTS)
class ChannelConnectivityTest(unittest.TestCase):
def testChannelConnectivity(self):
self.assertSequenceEqual(
(grpc.ChannelConnectivity.IDLE,
grpc.ChannelConnectivity.CONNECTING,
grpc.ChannelConnectivity.READY,
grpc.ChannelConnectivity.TRANSIENT_FAILURE,
grpc.ChannelConnectivity.SHUTDOWN,),
tuple(grpc.ChannelConnectivity))
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -187,5 +187,15 @@ class ChannelConnectivityTest(unittest.TestCase):
server_completion_queue_thread.join()
class ConnectivityStatesTest(unittest.TestCase):
def testBetaConnectivityStates(self):
self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
if __name__ == '__main__':
unittest.main(verbosity=2)

@ -1,7 +1,2 @@
<%include file="ccache_setup.include"/>
#======================
# Zookeeper dependencies
# TODO(jtattermusch): is zookeeper still needed?
RUN apt-get install -y libzookeeper-mt-dev
RUN mkdir /var/local/jenkins
<%include file="run_tests_addons_nocache.include"/>

@ -0,0 +1,6 @@
#======================
# Zookeeper dependencies
# TODO(jtattermusch): is zookeeper still needed?
RUN apt-get install -y libzookeeper-mt-dev
RUN mkdir /var/local/jenkins

@ -0,0 +1,65 @@
%YAML 1.2
--- |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
<%include file="../../apt_get_basic.include"/>
<%include file="../../ruby_deps.include"/>
<%include file="../../gcp_api_libraries.include"/>
<%include file="../../php_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# ronn: a ruby tool used to convert markdown to man pages, used during the
# install of Protobuf extensions
#
# rake: a ruby version of make used to build the PHP Protobuf extension
RUN /bin/bash -l -c "rvm all do gem install ronn rake"
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency
# into composer cache to prevent "composer install" from cloning on each build.
RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git ${'\\'}
/root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/
# Download the patched PHP protobuf so that PHP gRPC clients can be generated
# from proto3 schemas.
RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php
RUN /bin/bash -l -c "rvm use ruby-2.1 ${'\\'}
&& cd /var/local/git/protobuf-php ${'\\'}
&& rvm all do rake pear:package version=1.0 ${'\\'}
&& pear install Protobuf-1.0.tgz"
# Define the default command.
CMD ["bash"]

@ -33,7 +33,6 @@
<%include file="../../apt_get_basic.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
<%include file="../../run_tests_addons_nocache.include"/>
# Define the default command.
CMD ["bash"]

@ -181,6 +181,14 @@ class TestServiceImpl : public TestService::Service {
response.mutable_payload())) {
return Status(grpc::StatusCode::INTERNAL, "Error creating payload.");
}
int time_us;
if ((time_us = request->response_parameters(i).interval_us()) > 0) {
// Sleep before response if needed
gpr_timespec sleep_time =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(time_us, GPR_TIMESPAN));
gpr_sleep_until(sleep_time);
}
write_success = writer->Write(response);
}
if (write_success) {
@ -218,6 +226,14 @@ class TestServiceImpl : public TestService::Service {
response.mutable_payload()->set_type(request.payload().type());
response.mutable_payload()->set_body(
grpc::string(request.response_parameters(0).size(), '\0'));
int time_us;
if ((time_us = request.response_parameters(0).interval_us()) > 0) {
// Sleep before response if needed
gpr_timespec sleep_time =
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(time_us, GPR_TIMESPAN));
gpr_sleep_until(sleep_time);
}
write_success = stream->Write(response);
}
}

@ -125,13 +125,15 @@ class Client {
if (reset) {
Histogram* to_merge = new Histogram[threads_.size()];
for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->Swap(&to_merge[i]);
latencies.Merge(to_merge[i]);
threads_[i]->BeginSwap(&to_merge[i]);
}
delete[] to_merge;
std::unique_ptr<UsageTimer> timer(new UsageTimer);
timer_.swap(timer);
for (size_t i = 0; i < threads_.size(); i++) {
threads_[i]->EndSwap();
latencies.Merge(to_merge[i]);
}
delete[] to_merge;
timer_result = timer->Mark();
} else {
// merge snapshots of each thread histogram
@ -213,6 +215,7 @@ class Client {
public:
Thread(Client* client, size_t idx)
: done_(false),
new_stats_(nullptr),
client_(client),
idx_(idx),
impl_(&Thread::ThreadFunc, this) {}
@ -225,9 +228,16 @@ class Client {
impl_.join();
}
void Swap(Histogram* n) {
void BeginSwap(Histogram* n) {
std::lock_guard<std::mutex> g(mu_);
n->Swap(&histogram_);
new_stats_ = n;
}
void EndSwap() {
std::unique_lock<std::mutex> g(mu_);
while (new_stats_ != nullptr) {
cv_.wait(g);
};
}
void MergeStatsInto(Histogram* hist) {
@ -241,11 +251,10 @@ class Client {
void ThreadFunc() {
for (;;) {
// lock since the thread should only be doing one thing at a time
std::lock_guard<std::mutex> g(mu_);
// run the loop body
const bool thread_still_ok = client_->ThreadFunc(&histogram_, idx_);
// see if we're done
// lock, see if we're done
std::lock_guard<std::mutex> g(mu_);
if (!thread_still_ok) {
gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
done_ = true;
@ -253,11 +262,19 @@ class Client {
if (done_) {
return;
}
// check if we're resetting stats, swap out the histogram if so
if (new_stats_) {
new_stats_->Swap(&histogram_);
new_stats_ = nullptr;
cv_.notify_one();
}
}
}
std::mutex mu_;
std::condition_variable cv_;
bool done_;
Histogram* new_stats_;
Histogram histogram_;
Client* client_;
const size_t idx_;

@ -49,7 +49,7 @@ def mako_plugin(dictionary):
tests.append({
'name': new_target['name'],
'args': [fn],
'exclude_configs': [],
'exclude_configs': ['tsan'],
'uses_polling': False,
'platforms': ['linux'],
'ci_platforms': ['linux'],

@ -0,0 +1,140 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FROM debian:jessie
# Install Git and basic packages.
RUN apt-get update && apt-get install -y \
autoconf \
autotools-dev \
build-essential \
bzip2 \
ccache \
curl \
gcc \
gcc-multilib \
git \
golang \
gyp \
lcov \
libc6 \
libc6-dbg \
libc6-dev \
libgtest-dev \
libtool \
make \
perl \
strace \
python-dev \
python-setuptools \
python-yaml \
telnet \
unzip \
wget \
zip && apt-get clean
#================
# Build profiling
RUN apt-get update && apt-get install -y time && apt-get clean
#==================
# Ruby dependencies
# Install rvm
RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
RUN \curl -sSL https://get.rvm.io | bash -s stable
# Install Ruby 2.1
RUN /bin/bash -l -c "rvm install ruby-2.1"
RUN /bin/bash -l -c "rvm use --default ruby-2.1"
RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
# Google Cloud platform API libraries
RUN apt-get update && apt-get install -y python-pip && apt-get clean
RUN pip install --upgrade google-api-python-client
#=================
# PHP dependencies
# Install dependencies
RUN /bin/bash -l -c "echo 'deb http://packages.dotdeb.org wheezy-php55 all' \
>> /etc/apt/sources.list.d/dotdeb.list"
RUN /bin/bash -l -c "echo 'deb-src http://packages.dotdeb.org wheezy-php55 all' \
>> /etc/apt/sources.list.d/dotdeb.list"
RUN wget http://www.dotdeb.org/dotdeb.gpg -O- | apt-key add -
RUN apt-get update && apt-get install -y \
git php5 php5-dev phpunit unzip
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#======================
# Zookeeper dependencies
# TODO(jtattermusch): is zookeeper still needed?
RUN apt-get install -y libzookeeper-mt-dev
RUN mkdir /var/local/jenkins
# ronn: a ruby tool used to convert markdown to man pages, used during the
# install of Protobuf extensions
#
# rake: a ruby version of make used to build the PHP Protobuf extension
RUN /bin/bash -l -c "rvm all do gem install ronn rake"
# Install composer
RUN curl -sS https://getcomposer.org/installer | php
RUN mv composer.phar /usr/local/bin/composer
# As an attempt to work around #4212, try to prefetch Protobuf-PHP dependency
# into composer cache to prevent "composer install" from cloning on each build.
RUN git clone --mirror https://github.com/stanley-cheung/Protobuf-PHP.git \
/root/.composer/cache/vcs/git-github.com-stanley-cheung-Protobuf-PHP.git/
# Download the patched PHP protobuf so that PHP gRPC clients can be generated
# from proto3 schemas.
RUN git clone https://github.com/stanley-cheung/Protobuf-PHP.git /var/local/git/protobuf-php
RUN /bin/bash -l -c "rvm use ruby-2.1 \
&& cd /var/local/git/protobuf-php \
&& rvm all do rake pear:package version=1.0 \
&& pear install Protobuf-1.0.tgz"
# Define the default command.
CMD ["bash"]

@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Builds PHP interop server and client in a base image.
set -ex
mkdir -p /var/local/git
git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
# copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true
cd /var/local/git/grpc
rvm --default use ruby-2.1
make install-certs
# gRPC core and protobuf need to be installed
make install
(cd src/php/ext/grpc && phpize && ./configure && make)
(cd third_party/protobuf && make install)
(cd src/php && composer install)
(cd src/php && protoc-gen-php -i tests/interop/ -o tests/interop/ tests/interop/test.proto)

@ -67,14 +67,6 @@ RUN apt-get update && apt-get install -y time && apt-get clean
# C++ dependencies
RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean
# Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc
RUN ln -s /usr/bin/ccache /usr/local/bin/g++
RUN ln -s /usr/bin/ccache /usr/local/bin/cc
RUN ln -s /usr/bin/ccache /usr/local/bin/c++
RUN ln -s /usr/bin/ccache /usr/local/bin/clang
RUN ln -s /usr/bin/ccache /usr/local/bin/clang++
#======================
# Zookeeper dependencies
# TODO(jtattermusch): is zookeeper still needed?

@ -62,6 +62,11 @@ os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {}
_POLLING_STRATEGIES = {
'linux': ['poll', 'legacy']
}
def platform_string():
return jobset.platform_string()
@ -153,14 +158,8 @@ class CLanguage(object):
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
POLLING_STRATEGIES = {
'windows': ['all'],
'mac': ['all'],
'posix': ['all'],
'linux': ['poll', 'legacy']
}
for target in binaries:
polling_strategies = (POLLING_STRATEGIES[self.platform]
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
for polling_strategy in polling_strategies:
@ -395,7 +394,7 @@ class PythonLanguage(object):
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
environment['PYTHONPATH'] = '{}:{}'.format(
os.path.abspath('src/python/gens'),
os.path.abspath('src/python/gens'),
os.path.abspath('src/python/grpcio_health_checking'))
if self.config.build_config != 'gcov':
return [self.config.job_spec(
@ -854,8 +853,13 @@ argp.add_argument('--update_submodules', default=[], nargs='*',
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary

@ -0,0 +1,93 @@
{
"dockerImages": {
"grpc_stress_cxx_opt" : {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_cxx",
"buildType": "opt"
},
"grpc_stress_php": {
"buildScript": "tools/run_tests/dockerize/build_interop_stress_image.sh",
"dockerFileDir": "grpc_interop_stress_php"
}
},
"clientTemplates": {
"baseTemplates": {
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_client.py",
"pollIntervalSecs": 60,
"clientArgs": {
"num_channels_per_server":5,
"num_stubs_per_channel":10,
"test_cases": "empty_unary:1,large_unary:1,client_streaming:1,server_streaming:1,empty_stream:1",
"metrics_port": 8081
},
"metricsPort": 8081,
"metricsArgs": {
"metrics_server_address": "localhost:8081"
}
}
},
"templates": {
"php_client": {
"baseTemplate": "default",
"stressClientCmd": [
"/var/local/git/grpc/src/php/bin/stress_client.sh"
],
"metricsClientCmd": [
"php",
"/var/local/git/grpc/src/php/tests/interop/metrics_client.php"
]
}
}
},
"serverTemplates": {
"baseTemplates":{
"default": {
"wrapperScriptPath": "/var/local/git/grpc/tools/gcp/stress_test/run_server.py",
"serverPort": 8080,
"serverArgs": {
"port": 8080
}
}
},
"templates": {
"cxx_server_opt": {
"baseTemplate": "default",
"stressServerCmd": ["/var/local/git/grpc/bins/opt/interop_server"]
}
}
},
"testMatrix": {
"serverPodSpecs": {
"stress-server-cxx-php": {
"serverTemplate": "cxx_server_opt",
"dockerImage": "grpc_stress_cxx_opt",
"numInstances": 1
}
},
"clientPodSpecs": {
"stress-client-php": {
"clientTemplate": "php_client",
"dockerImage": "grpc_stress_php",
"numInstances": 20,
"serverPodSpec": "stress-server-cxx-php"
}
}
},
"globalSettings": {
"buildDockerImages": true,
"pollIntervalSecs": 60,
"testDurationSecs": 7200,
"kubernetesProxyPort": 8010,
"datasetIdNamePrefix": "stress_test_php_cxx_opt",
"summaryTableId": "summary",
"qpsTableId": "qps",
"podWarmupSecs": 60
}
}

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save