Merge remote-tracking branch 'upstream/v1.4.x' into master_1.4.1_upmerge

pull/11639/head
murgatroid99 8 years ago
commit 882ba60fcb
  1. 13
      src/core/lib/iomgr/tcp_uv.c
  2. 5
      src/node/ext/call.cc
  3. 2
      src/node/src/client.js
  4. 2
      src/node/src/protobuf_js_6_common.js
  5. 24
      src/node/test/surface_test.js
  6. 55
      test/cpp/end2end/round_robin_end2end_test.cc
  7. 2
      test/cpp/microbenchmarks/bm_fullstack_trickle.cc
  8. 38
      third_party/cares/config_linux/ares_config.h
  9. 8
      tools/dockerfile/grpc_artifact_linux_x64/Dockerfile
  10. 7
      tools/dockerfile/grpc_artifact_protoc/Dockerfile
  11. 4
      tools/jenkins/run_qps_diff.sh
  12. 2
      tools/jenkins/run_trickle_diff.sh
  13. 7
      tools/run_tests/artifacts/artifact_targets.py
  14. 7
      tools/run_tests/artifacts/build_artifact_node.sh

@ -65,6 +65,7 @@ typedef struct {
} grpc_tcp;
static void tcp_free(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
grpc_resource_user_unref(exec_ctx, tcp->resource_user);
gpr_free(tcp);
}
@ -115,13 +116,17 @@ static void uv_close_callback(uv_handle_t *handle) {
grpc_exec_ctx_finish(&exec_ctx);
}
static grpc_slice alloc_read_slice(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user) {
return grpc_resource_user_slice_malloc(exec_ctx, resource_user,
GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
}
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = handle->data;
(void)suggested_size;
tcp->read_slice = grpc_resource_user_slice_malloc(
&exec_ctx, tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);
buf->len = GRPC_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx);
@ -148,6 +153,7 @@ static void read_callback(uv_stream_t *stream, ssize_t nread,
// Successful read
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
grpc_slice_buffer_add(tcp->read_slices, sub);
tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user);
error = GRPC_ERROR_NONE;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
size_t i;
@ -334,6 +340,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (GRPC_TRACER_ON(grpc_tcp_trace)) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
@ -350,6 +357,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
tcp->read_slice = alloc_read_slice(&exec_ctx, tcp->resource_user);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);
@ -357,6 +365,7 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
uv_unref((uv_handle_t *)handle);
#endif
grpc_exec_ctx_finish(&exec_ctx);
return &tcp->base;
}

@ -383,7 +383,10 @@ class ClientStatusOp : public Op {
public:
ClientStatusOp() { grpc_metadata_array_init(&metadata_array); }
~ClientStatusOp() { grpc_metadata_array_destroy(&metadata_array); }
~ClientStatusOp() {
grpc_metadata_array_destroy(&metadata_array);
grpc_slice_unref(status_details);
}
bool ParseOp(Local<Value> value, grpc_op *out) {
out->data.recv_status_on_client.trailing_metadata = &metadata_array;

@ -137,6 +137,7 @@ function _write(chunk, encoding, callback) {
/* Once a write fails, just call the callback immediately to let the caller
flush any pending writes. */
setImmediate(callback);
return;
}
try {
message = this.serialize(chunk);
@ -149,6 +150,7 @@ function _write(chunk, encoding, callback) {
this.call.cancelWithStatus(constants.status.INTERNAL,
'Serialization failure');
callback(e);
return;
}
if (_.isFinite(encoding)) {
/* Attach the encoding if it is a finite number. This is the closest we

@ -49,7 +49,7 @@ exports.deserializeCls = function deserializeCls(cls, options) {
* @return {cls} The resulting object
*/
return function deserialize(arg_buf) {
return cls.decode(arg_buf).toObject(conversion_options);
return cls.toObject(cls.decode(arg_buf), conversion_options);
};
};

@ -1398,13 +1398,25 @@ describe('Client reconnect', function() {
});
server.bind('localhost:' + port, server_insecure_creds);
server.start();
client.echo(undefined, function(error, response) {
if (error) {
console.log(error);
}
/* We create a new client, that will not throw an error if the server
* is not immediately available. Instead, it will wait for the server
* to be available, then the call will complete. Once this happens, the
* original client should be able to make a new call and connect to the
* restarted server without having the call fail due to connection
* errors. */
var client2 = new Client('localhost:' + port,
grpc.credentials.createInsecure());
client2.echo({value: 'test', value2: 3}, function(error, response) {
assert.ifError(error);
assert.deepEqual(response, {value: '', value2: 0});
done();
client.echo(undefined, function(error, response) {
if (error) {
console.log(error);
}
assert.ifError(error);
assert.deepEqual(response, {value: '', value2: 0});
done();
});
});
});
});

@ -73,9 +73,12 @@ class RoundRobinEnd2endTest : public ::testing::Test {
protected:
RoundRobinEnd2endTest() : server_host_("localhost") {}
void StartServers(int num_servers) {
for (int i = 0; i < num_servers; ++i) {
servers_.emplace_back(new ServerData(server_host_));
void StartServers(size_t num_servers,
std::vector<int> ports = std::vector<int>()) {
for (size_t i = 0; i < num_servers; ++i) {
int port = 0;
if (ports.size() == num_servers) port = ports[i];
servers_.emplace_back(new ServerData(server_host_, port));
}
}
@ -99,15 +102,19 @@ class RoundRobinEnd2endTest : public ::testing::Test {
stub_ = grpc::testing::EchoTestService::NewStub(channel_);
}
void SendRpc(int num_rpcs) {
void SendRpc(int num_rpcs, bool expect_ok = true) {
EchoRequest request;
EchoResponse response;
request.set_message("Live long and prosper.");
for (int i = 0; i < num_rpcs; i++) {
ClientContext context;
Status status = stub_->Echo(&context, request, &response);
EXPECT_TRUE(status.ok());
EXPECT_EQ(response.message(), request.message());
if (expect_ok) {
EXPECT_TRUE(status.ok());
EXPECT_EQ(response.message(), request.message());
} else {
EXPECT_FALSE(status.ok());
}
}
}
@ -116,8 +123,8 @@ class RoundRobinEnd2endTest : public ::testing::Test {
std::unique_ptr<Server> server_;
MyTestServiceImpl service_;
explicit ServerData(const grpc::string& server_host) {
port_ = grpc_pick_unused_port_or_die();
explicit ServerData(const grpc::string& server_host, int port = 0) {
port_ = port > 0 ? port : grpc_pick_unused_port_or_die();
gpr_log(GPR_INFO, "starting server on port %d", port_);
std::ostringstream server_address;
server_address << server_host << ":" << port_;
@ -176,6 +183,38 @@ TEST_F(RoundRobinEnd2endTest, RoundRobin) {
EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
}
TEST_F(RoundRobinEnd2endTest, RoundRobinReconnect) {
// Start servers and send one RPC per server.
const int kNumServers = 1;
std::vector<int> ports;
ports.push_back(grpc_pick_unused_port_or_die());
StartServers(kNumServers, ports);
ResetStub(true /* round_robin */);
// Send one RPC per backend and make sure they are used in order.
// Note: This relies on the fact that the subchannels are reported in
// state READY in the order in which the addresses are specified,
// which is only true because the backends are all local.
for (size_t i = 0; i < servers_.size(); ++i) {
SendRpc(1);
EXPECT_EQ(1, servers_[i]->service_.request_count()) << "for backend #" << i;
}
// Check LB policy name for the channel.
EXPECT_EQ("round_robin", channel_->GetLoadBalancingPolicyName());
// Kill all servers
for (size_t i = 0; i < servers_.size(); ++i) {
servers_[i]->Shutdown();
}
// Client request should fail.
SendRpc(1, false);
// Bring servers back up on the same port (we aren't recreating the channel).
StartServers(kNumServers, ports);
// Client request should succeed.
SendRpc(1);
}
} // namespace
} // namespace testing
} // namespace grpc

@ -315,7 +315,7 @@ BENCHMARK(BM_PumpStreamServerToClient_Trickle)->Apply(StreamingTrickleArgs);
static void BM_PumpUnbalancedUnary_Trickle(benchmark::State& state) {
EchoTestService::AsyncService service;
std::unique_ptr<TrickledCHTTP2> fixture(new TrickledCHTTP2(
&service, true, state.range(0) /* req_size */,
&service, false, state.range(0) /* req_size */,
state.range(1) /* resp_size */, state.range(2) /* bw in kbit/s */));
EchoRequest send_request;
EchoResponse send_response;

@ -70,8 +70,14 @@
/* Define to 1 if bool is an available type. */
#define HAVE_BOOL_T 1
/* Define to 1 if you have the clock_gettime function and monotonic timer. */
#define HAVE_CLOCK_GETTIME_MONOTONIC 1
/* Define HAVE_CLOCK_GETTIME_MONOTONIC to 1 if you have the clock_gettime
* function and monotonic timer.
*
* Note: setting HAVE_CLOCK_GETTIME_MONOTONIC causes use of the clock_gettime
* function from glibc, don't set it to support glibc < 2.17 */
#ifndef GPR_BACKWARDS_COMPATIBILITY_MODE
#define HAVE_CLOCK_GETTIME_MONOTONIC 1
#endif
/* Define to 1 if you have the closesocket function. */
/* #undef HAVE_CLOSESOCKET */
@ -505,6 +511,34 @@
# define _DARWIN_USE_64_BIT_INODE 1
#endif
#ifdef GPR_BACKWARDS_COMPATIBILITY_MODE
/* Redefine the fd_set macros for GLIBC < 2.15 support.
* This is a backwards compatibility hack. At version 2.15, GLIBC introduces
* the __fdelt_chk function, and starts using it within its fd_set macros
* (which c-ares uses). For compatibility with GLIBC < 2.15, we need to redefine
* the fd_set macros to not use __fdelt_chk. */
#include <sys/select.h>
#undef FD_SET
#undef FD_CLR
#undef FD_ISSET
/* 'FD_ZERO' doesn't use __fdelt_chk, no need to redefine. */
#ifdef __FDS_BITS
#define GRPC_CARES_FDS_BITS(set) __FDS_BITS(set)
#else
#define GRPC_CARES_FDS_BITS(set) ((set)->fds_bits)
#endif
#define GRPC_CARES_FD_MASK(d) ((long int)(1UL << (d) % NFDBITS))
#define FD_SET(d, set) \
((void) (GRPC_CARES_FDS_BITS (set)[ (d) / NFDBITS ] |= GRPC_CARES_FD_MASK(d)))
#define FD_CLR(d, set) \
((void) (GRPC_CARES_FDS_BITS (set)[ (d) / NFDBITS ] &= ~GRPC_CARES_FD_MASK(d)))
#define FD_ISSET(d, set) \
((GRPC_CARES_FDS_BITS (set)[ (d) / NFDBITS ] & GRPC_CARES_FD_MASK(d)) != 0)
#endif /* GPR_BACKWARDS_COMPATIBILITY_MODE */
/* Number of bits in a file offset, on hosts where this is settable. */
/* #undef _FILE_OFFSET_BITS */

@ -86,6 +86,14 @@ RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
RUN apt-get update && apt-get install -y \
php5 php5-dev php-pear phpunit
##################
# Install cross compiler for ARM
RUN echo 'deb http://emdebian.org/tools/debian/ jessie main' | tee -a /etc/apt/sources.list.d/crosstools.list && \
curl http://emdebian.org/tools/debian/emdebian-toolchain-archive.key | apt-key add -
RUN dpkg --add-architecture armhf && apt-get update && apt-get install -y crossbuild-essential-armhf
RUN mkdir /var/local/jenkins
# Define the default command.

@ -45,10 +45,9 @@ RUN yum install -y devtoolset-1.1 \
devtoolset-1.1-libstdc++-devel.i686 || true
# Update Git to version >1.7 to allow cloning submodules with --reference arg.
RUN yum remove -y git
RUN yum install -y epel-release
RUN yum install -y https://centos6.iuscommunity.org/ius-release.rpm
RUN yum install -y git2u
RUN yum remove -y git && yum clean all
RUN yum install -y https://centos6.iuscommunity.org/ius-release.rpm && yum clean all
RUN yum install -y git2u && yum clean all
# Start in devtoolset environment that uses GCC 4.7
CMD ["scl", "enable", "devtoolset-1.1", "bash"]

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Copyright 2015 gRPC authors.
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -20,4 +20,4 @@ set -ex
cd $(dirname $0)/../..
tools/run_tests/start_port_server.py
tools/profiling/qps/qps_diff.py -d origin/$ghprbTargetBranch
tools/profiling/qps/qps_diff.py -d origin/$ghprbTargetBranch

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Copyright 2015 gRPC authors.
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

@ -264,11 +264,12 @@ class NodeExtArtifact:
return create_docker_jobspec(
self.name,
'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
'tools/run_tests/artifacts/build_artifact_node.sh {}'.format(self.gyp_arch))
'tools/run_tests/artifacts/build_artifact_node.sh {} {}'.format(
self.gyp_arch, self.platform))
else:
return create_jobspec(self.name,
['tools/run_tests/artifacts/build_artifact_node.sh',
self.gyp_arch],
self.gyp_arch, self.platform],
use_workspace=True)
class PHPArtifact:
@ -328,7 +329,7 @@ class ProtocArtifact:
environ=environ,
use_workspace=True)
else:
generator = 'Visual Studio 12 Win64' if self.arch == 'x64' else 'Visual Studio 12'
generator = 'Visual Studio 12 Win64' if self.arch == 'x64' else 'Visual Studio 12'
vcplatform = 'x64' if self.arch == 'x64' else 'Win32'
return create_jobspec(self.name,
['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],

@ -14,6 +14,7 @@
# limitations under the License.
NODE_TARGET_ARCH=$1
NODE_TARGET_OS=$2
source ~/.nvm/nvm.sh
nvm use 8
@ -35,6 +36,12 @@ for version in ${node_versions[@]}
do
./node_modules/.bin/node-pre-gyp configure rebuild package --target=$version --target_arch=$NODE_TARGET_ARCH --grpc_alpine=true
cp -r build/stage/* "${ARTIFACTS_OUT}"/
if [ "$NODE_TARGET_ARCH" == 'x64' ] && [ "$NODE_TARGET_OS" == 'linux' ]
then
# Cross compile for ARM on x64
CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ LD=arm-linux-gnueabihf-g++ ./node_modules/.bin/node-pre-gyp configure rebuild package testpackage --target=$version --target_arch=arm
cp -r build/stage/* "${ARTIFACTS_OUT}"/
fi
done
for version in ${electron_versions[@]}

Loading…
Cancel
Save