Merge branch 'master' of github.com:ctiller/grpc

changes/47/217547/1
Craig Tiller 10 years ago
commit 55e8c6e427
  1. 3
      .gitignore
  2. 2
      INSTALL
  3. 312
      Makefile
  4. 54
      build.json
  5. 60
      examples/tips/client.cc
  6. 36
      examples/tips/client.h
  7. 73
      examples/tips/client_main.cc
  8. 106
      examples/tips/client_test.cc
  9. 13
      examples/tips/empty.proto
  10. 48
      examples/tips/label.proto
  11. 702
      examples/tips/pubsub.proto
  12. 3
      include/grpc/grpc.h
  13. 8
      include/grpc/support/port_platform.h
  14. 6
      src/core/iomgr/endpoint_pair_posix.c
  15. 6
      src/core/iomgr/fd_posix.c
  16. 4
      src/core/iomgr/pollset.h
  17. 6
      src/core/iomgr/pollset_kick.h
  18. 6
      src/core/iomgr/pollset_kick_posix.c
  19. 45
      src/core/iomgr/pollset_kick_windows.h
  20. 6
      src/core/iomgr/pollset_posix.c
  21. 38
      src/core/iomgr/pollset_windows.c
  22. 54
      src/core/iomgr/pollset_windows.h
  23. 6
      src/core/iomgr/resolve_address.h
  24. 10
      src/core/iomgr/socket_utils_common_posix.c
  25. 18
      src/core/iomgr/socket_utils_posix.c
  26. 6
      src/core/iomgr/tcp_client_posix.c
  27. 6
      src/core/iomgr/tcp_posix.c
  28. 5
      src/core/iomgr/tcp_server.h
  29. 10
      src/core/iomgr/tcp_server_posix.c
  30. 16
      src/core/security/server_secure_chttp2.c
  31. 2
      src/core/support/time_posix.c
  32. 2
      src/core/transport/chttp2/frame_data.c
  33. 2
      src/core/transport/chttp2/hpack_parser.c
  34. 2
      src/core/transport/chttp2_transport.c
  35. 2
      src/core/transport/stream_op.c
  36. 53
      src/node/client.js
  37. 11
      src/node/interop/interop_server.js
  38. 4
      src/node/server.cc
  39. 69
      src/node/server.js
  40. 3
      src/node/surface_server.js
  41. 159
      src/node/test/client_server_test.js
  42. 239
      src/node/test/end_to_end_test.js
  43. 15
      src/node/test/interop_sanity_test.js
  44. 10
      src/node/test/math_client_test.js
  45. 87
      src/node/test/server_test.js
  46. 2
      src/php/ext/grpc/credentials.c
  47. 0
      src/python/__init__.py
  48. 0
      src/python/_framework/__init__.py
  49. 0
      src/python/_framework/foundation/__init__.py
  50. 65
      src/python/_framework/foundation/_logging_pool_test.py
  51. 83
      src/python/_framework/foundation/logging_pool.py
  52. 83
      src/ruby/README.md
  53. 11
      src/ruby/bin/interop/README.md
  54. 4
      src/ruby/bin/interop/interop_server.rb
  55. 8
      src/ruby/ext/grpc/extconf.rb
  56. 2
      src/ruby/ext/grpc/rb_completion_queue.c
  57. 8
      src/ruby/grpc.gemspec
  58. 2
      src/ruby/lib/grpc/generic/bidi_call.rb
  59. 6
      src/ruby/lib/grpc/generic/rpc_server.rb
  60. 2
      src/ruby/lib/grpc/logconfig.rb
  61. 2
      src/ruby/spec/client_server_spec.rb
  62. 3
      src/ruby/spec/testdata/README
  63. 30
      templates/Makefile.template
  64. 12
      test/core/end2end/cq_verifier.c
  65. 25
      test/core/iomgr/fd_posix_test.c
  66. 5
      test/core/iomgr/resolve_address_test.c
  67. 6
      test/core/iomgr/sockaddr_utils_test.c
  68. 4
      test/core/iomgr/tcp_client_posix_test.c
  69. 5
      test/core/transport/transport_end2end_tests.c
  70. 2
      test/core/util/test_config.c
  71. 4
      test/cpp/interop/client.cc
  72. 27
      tools/dockerfile/grpc_go/Dockerfile
  73. 4
      tools/dockerfile/grpc_go/README.md
  74. 81
      tools/gce_setup/grpc_docker.sh
  75. 21
      tools/gce_setup/shared_startup_funcs.sh
  76. 10
      tools/run_tests/build_python.sh
  77. 4
      tools/run_tests/jobset.py
  78. 10
      tools/run_tests/run_python.sh
  79. 18
      tools/run_tests/run_tests.py
  80. 4
      tools/run_tests/tests.json
  81. 4
      vsprojects/vs2013/grpc.vcxproj
  82. 4
      vsprojects/vs2013/grpc_unsecure.vcxproj

3
.gitignore vendored

@ -4,6 +4,9 @@ gens
libs libs
objs objs
# Python virtual environment (pre-3.4 only)
python2.7_virtual_environment
# gcov coverage data # gcov coverage data
coverage coverage
*.gcno *.gcno

@ -17,7 +17,7 @@ A typical unix installation won't require any more steps than running:
You don't need anything else than GNU Make and gcc. Under a Debian or You don't need anything else than GNU Make and gcc. Under a Debian or
Ubuntu system, this should boil down to the following package: Ubuntu system, this should boil down to the following package:
# apt-get install build-essential # apt-get install build-essential python-all-dev python-virtualenv
******************************* *******************************

File diff suppressed because one or more lines are too long

@ -48,7 +48,9 @@
"src/core/iomgr/pollset.h", "src/core/iomgr/pollset.h",
"src/core/iomgr/pollset_kick.h", "src/core/iomgr/pollset_kick.h",
"src/core/iomgr/pollset_kick_posix.h", "src/core/iomgr/pollset_kick_posix.h",
"src/core/iomgr/pollset_kick_windows.h",
"src/core/iomgr/pollset_posix.h", "src/core/iomgr/pollset_posix.h",
"src/core/iomgr/pollset_windows.h",
"src/core/iomgr/resolve_address.h", "src/core/iomgr/resolve_address.h",
"src/core/iomgr/sockaddr.h", "src/core/iomgr/sockaddr.h",
"src/core/iomgr/sockaddr_posix.h", "src/core/iomgr/sockaddr_posix.h",
@ -126,6 +128,7 @@
"src/core/iomgr/pollset_kick_posix.c", "src/core/iomgr/pollset_kick_posix.c",
"src/core/iomgr/pollset_multipoller_with_poll_posix.c", "src/core/iomgr/pollset_multipoller_with_poll_posix.c",
"src/core/iomgr/pollset_posix.c", "src/core/iomgr/pollset_posix.c",
"src/core/iomgr/pollset_windows.c",
"src/core/iomgr/resolve_address_posix.c", "src/core/iomgr/resolve_address_posix.c",
"src/core/iomgr/sockaddr_utils.c", "src/core/iomgr/sockaddr_utils.c",
"src/core/iomgr/socket_utils_common_posix.c", "src/core/iomgr/socket_utils_common_posix.c",
@ -408,6 +411,22 @@
"test/cpp/end2end/async_test_server.cc", "test/cpp/end2end/async_test_server.cc",
"test/cpp/util/create_test_channel.cc" "test/cpp/util/create_test_channel.cc"
] ]
},
{
"name": "tips_client_lib",
"build": "private",
"language": "c++",
"src": [
"examples/tips/label.proto",
"examples/tips/empty.proto",
"examples/tips/pubsub.proto",
"examples/tips/client.cc"
],
"deps": [
"grpc++",
"grpc",
"gpr"
]
} }
], ],
"targets": [ "targets": [
@ -1493,6 +1512,41 @@
], ],
"run": false "run": false
}, },
{
"name": "tips_client",
"build": "test",
"run": false,
"language": "c++",
"src": [
"examples/tips/client_main.cc"
],
"deps": [
"tips_client_lib",
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{
"name": "tips_client_test",
"build": "test",
"language": "c++",
"src": [
"examples/tips/client_test.cc"
],
"deps": [
"tips_client_lib",
"grpc++_test_util",
"grpc_test_util",
"grpc++",
"grpc",
"gpr_test_util",
"gpr"
]
},
{ {
"name": "qps_client", "name": "qps_client",
"build": "test", "build": "test",

@ -0,0 +1,60 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/client_context.h>
#include "examples/tips/client.h"
using tech::pubsub::Topic;
using tech::pubsub::PublisherService;
namespace grpc {
namespace examples {
namespace tips {
Client::Client(std::shared_ptr<ChannelInterface> channel)
: stub_(PublisherService::NewStub(channel)) {
}
Status Client::CreateTopic(grpc::string topic) {
Topic request;
Topic response;
request.set_name(topic);
ClientContext context;
return stub_->CreateTopic(&context, request, &response);
}
} // namespace tips
} // namespace examples
} // namespace grpc

@ -31,22 +31,24 @@
* *
*/ */
var net = require('net'); #include <grpc++/channel_interface.h>
#include <grpc++/status.h>
/** #include "examples/tips/pubsub.pb.h"
* Finds a free port that a server can bind to, in the format
* "address:port" namespace grpc {
* @param {function(string)} cb The callback that should execute when the port namespace examples {
* is available namespace tips {
*/
function nextAvailablePort(cb) { class Client {
var server = net.createServer(); public:
server.listen(function() { Client(std::shared_ptr<grpc::ChannelInterface> channel);
var address = server.address(); Status CreateTopic(grpc::string topic);
server.close(function() {
cb(address.address + ':' + address.port.toString()); private:
}); std::unique_ptr<tech::pubsub::PublisherService::Stub> stub_;
}); };
}
exports.nextAvailablePort = nextAvailablePort; } // namespace tips
} // namespace examples
} // namespace grpc

@ -0,0 +1,73 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <google/gflags.h>
#include <grpc++/channel_interface.h>
#include <grpc++/create_channel.h>
#include <grpc++/status.h>
#include "examples/tips/client.h"
#include "test/cpp/util/create_test_channel.h"
DEFINE_bool(enable_ssl, true, "Whether to use ssl/tls.");
DEFINE_bool(use_prod_roots, true, "True to use SSL roots for production GFE");
DEFINE_int32(server_port, 0, "Server port.");
DEFINE_string(server_host, "127.0.0.1", "Server host to connect to");
DEFINE_string(server_host_override, "foo.test.google.com",
"Override the server host which is sent in HTTP header");
int main(int argc, char** argv) {
grpc_init();
google::ParseCommandLineFlags(&argc, &argv, true);
gpr_log(GPR_INFO, "Start TIPS client");
GPR_ASSERT(FLAGS_server_port);
const int host_port_buf_size = 1024;
char host_port[host_port_buf_size];
snprintf(host_port, host_port_buf_size, "%s:%d", FLAGS_server_host.c_str(),
FLAGS_server_port);
std::shared_ptr<grpc::ChannelInterface> channel(
grpc::CreateTestChannel(host_port, FLAGS_server_host_override,
FLAGS_enable_ssl, FLAGS_use_prod_roots));
grpc::examples::tips::Client client(channel);
grpc::Status s = client.CreateTopic("test");
GPR_ASSERT(s.IsOk());
channel.reset();
grpc_shutdown();
return 0;
}

@ -0,0 +1,106 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc++/channel_arguments.h>
#include <grpc++/channel_interface.h>
#include <grpc++/client_context.h>
#include <grpc++/create_channel.h>
#include <grpc++/server.h>
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc++/status.h>
#include <gtest/gtest.h>
#include "examples/tips/client.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
using grpc::ChannelInterface;
namespace grpc {
namespace testing {
namespace {
const char kTopic[] = "test topic";
class PublishServiceImpl : public tech::pubsub::PublisherService::Service {
public:
Status CreateTopic(::grpc::ServerContext* context,
const ::tech::pubsub::Topic* request,
::tech::pubsub::Topic* response) override {
EXPECT_EQ(request->name(), kTopic);
return Status::OK;
}
};
class End2endTest : public ::testing::Test {
protected:
void SetUp() override {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
// Setup server
ServerBuilder builder;
builder.AddPort(server_address_.str());
builder.RegisterService(service_.service());
server_ = builder.BuildAndStart();
channel_ = CreateChannel(server_address_.str(), ChannelArguments());
}
void TearDown() override { server_->Shutdown(); }
std::unique_ptr<Server> server_;
std::ostringstream server_address_;
PublishServiceImpl service_;
std::shared_ptr<ChannelInterface> channel_;
};
TEST_F(End2endTest, CreateTopic) {
grpc::examples::tips::Client client(channel_);
client.CreateTopic(kTopic);
}
} // namespace
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc_test_init(argc, argv);
grpc_init();
::testing::InitGoogleTest(&argc, argv);
gpr_log(GPR_INFO, "Start test ...");
int result = RUN_ALL_TESTS();
grpc_shutdown();
return result;
}

@ -0,0 +1,13 @@
syntax = "proto2";
package proto2;
// An empty message that you can re-use to avoid defining duplicated empty
// messages in your project. A typical example is to use it as argument or the
// return value of a service API. For instance:
//
// service Foo {
// rpc Bar (proto2.Empty) returns (proto2.Empty) { };
// };
//
message Empty {}

@ -0,0 +1,48 @@
// Labels provide a way to associate user-defined metadata with various
// objects. Labels may be used to organize objects into non-hierarchical
// groups; think metadata tags attached to mp3s.
syntax = "proto2";
package tech.label;
// A key-value pair applied to a given object.
message Label {
// The key of a label is a syntactically valid URL (as per RFC 1738) with
// the "scheme" and initial slashes omitted and with the additional
// restrictions noted below. Each key should be globally unique. The
// "host" portion is called the "namespace" and is not necessarily
// resolvable to a network endpoint. Instead, the namespace indicates what
// system or entity defines the semantics of the label. Namespaces do not
// restrict the set of objects to which a label may be associated.
//
// Keys are defined by the following grammar:
//
// key = hostname "/" kpath
// kpath = ksegment *[ "/" ksegment ]
// ksegment = alphadigit | *[ alphadigit | "-" | "_" | "." ]
//
// where "hostname" and "alphadigit" are defined as in RFC 1738.
//
// Example key:
// spanner.google.com/universe
required string key = 1;
// The value of the label.
oneof value {
// A string value.
string str_value = 2;
// An integer value.
int64 num_value = 3;
}
}
// A collection of labels, such as the set of all labels attached to an
// object. Each label in the set must have a different key.
//
// Users should prefer to embed "repeated Label" directly when possible.
// This message should only be used in cases where that isn't possible (e.g.
// with oneof).
message Labels {
repeated Label label = 1;
}

@ -0,0 +1,702 @@
// Specification of the Pubsub API.
syntax = "proto2";
import "examples/tips/empty.proto";
import "examples/tips/label.proto";
package tech.pubsub;
// -----------------------------------------------------------------------------
// Overview of the Pubsub API
// -----------------------------------------------------------------------------
// This file describes an API for a Pubsub system. This system provides a
// reliable many-to-many communication mechanism between independently written
// publishers and subscribers where the publisher publishes messages to "topics"
// and each subscriber creates a "subscription" and consumes messages from it.
//
// (a) The pubsub system maintains bindings between topics and subscriptions.
// (b) A publisher publishes messages into a topic.
// (c) The pubsub system delivers messages from topics into relevant
// subscriptions.
// (d) A subscriber receives pending messages from its subscription and
// acknowledges or nacks each one to the pubsub system.
// (e) The pubsub system removes acknowledged messages from that subscription.
// -----------------------------------------------------------------------------
// Data Model
// -----------------------------------------------------------------------------
// The data model consists of the following:
//
// * Topic: A topic is a resource to which messages are published by publishers.
// Topics are named, and the name of the topic is unique within the pubsub
// system.
//
// * Subscription: A subscription records the subscriber's interest in a topic.
// It can optionally include a query to select a subset of interesting
// messages. The pubsub system maintains a logical cursor tracking the
// matching messages which still need to be delivered and acked so that
// they can retried as needed. The set of messages that have not been
// acknowledged is called the subscription backlog.
//
// * Message: A message is a unit of data that flows in the system. It contains
// opaque data from the publisher along with its labels.
//
// * Message Labels (optional): A set of opaque key, value pairs assigned
// by the publisher which the subscriber can use for filtering out messages
// in the topic. For example, a label with key "foo.com/device_type" and
// value "mobile" may be added for messages that are only relevant for a
// mobile subscriber; a subscriber on a phone may decide to create a
// subscription only for messages that have this label.
// -----------------------------------------------------------------------------
// Publisher Flow
// -----------------------------------------------------------------------------
// A publisher publishes messages to the topic using the Publish request:
//
// PubsubMessage message;
// message.set_data("....");
// Label label;
// label.set_key("foo.com/key1");
// label.set_str_value("value1");
// message.add_label(label);
// PublishRequest request;
// request.set_topic("topicName");
// request.set_message(message);
// PublisherService.Publish(request);
// -----------------------------------------------------------------------------
// Subscriber Flow
// -----------------------------------------------------------------------------
// The subscriber part of the API is richer than the publisher part and has a
// number of concepts w.r.t. subscription creation and monitoring:
//
// (1) A subscriber creates a subscription using the CreateSubscription call.
// It may specify an optional "query" to indicate that it wants to receive
// only messages with a certain set of labels using the label query syntax.
// It may also specify an optional truncation policy to indicate when old
// messages from the subcription can be removed.
//
// (2) A subscriber receives messages in one of two ways: via push or pull.
//
// (a) To receive messages via push, the PushConfig field must be specified in
// the Subscription parameter when creating a subscription. The PushConfig
// specifies an endpoint at which the subscriber must expose the
// PushEndpointService. Messages are received via the HandlePubsubEvent
// method. The push subscriber responds to the HandlePubsubEvent method
// with a result code that indicates one of three things: Ack (the message
// has been successfully processed and the Pubsub system may delete it),
// Nack (the message has been rejected, the Pubsub system should resend it
// at a later time), or Push-Back (this is a Nack with the additional
// semantics that the subscriber is overloaded and the pubsub system should
// back off on the rate at which it is invoking HandlePubsubEvent). The
// endpoint may be a load balancer for better scalability.
//
// (b) To receive messages via pull a subscriber calls the Pull method on the
// SubscriberService to get messages from the subscription. For each
// individual message, the subscriber may use the ack_id received in the
// PullResponse to Ack the message, Nack the message, or modify the ack
// deadline with ModifyAckDeadline. See the
// Subscription.ack_deadline_seconds field documentation for details on the
// ack deadline behavior.
//
// Note: Messages may be consumed in parallel by multiple subscribers making
// Pull calls to the same subscription; this will result in the set of
// messages from the subscription being shared and each subscriber
// receiving a subset of the messages.
//
// (4) The subscriber can explicitly truncate the current subscription.
//
// (5) "Truncated" events are delivered when a subscription is
// truncated, whether due to the subscription's truncation policy
// or an explicit request from the subscriber.
//
// Subscription creation:
//
// Subscription subscription;
// subscription.set_topic("topicName");
// subscription.set_name("subscriptionName");
// subscription.push_config().set_push_endpoint("machinename:8888");
// SubscriberService.CreateSubscription(subscription);
//
// Consuming messages via push:
//
// TODO(eschapira): Add HTTP push example.
//
// The port 'machinename:8888' must be bound to a stubby server that implements
// the PushEndpointService with the following method:
//
// int HandlePubsubEvent(PubsubEvent event) {
// if (event.subscription().equals("subscriptionName")) {
// if (event.has_message()) {
// Process(event.message().data());
// } else if (event.truncated()) {
// ProcessTruncatedEvent();
// }
// }
// return OK; // This return code implies an acknowledgment
// }
//
// Consuming messages via pull:
//
// The subscription must be created without setting the push_config field.
//
// PullRequest pull_request;
// pull_request.set_subscription("subscriptionName");
// pull_request.set_return_immediately(false);
// while (true) {
// PullResponse pull_response;
// if (SubscriberService.Pull(pull_request, pull_response) == OK) {
// PubsubEvent event = pull_response.pubsub_event();
// if (event.has_message()) {
// Process(event.message().data());
// } else if (event.truncated()) {
// ProcessTruncatedEvent();
// }
// AcknowledgeRequest ack_request;
// ackRequest.set_subscription("subscriptionName");
// ackRequest.set_ack_id(pull_response.ack_id());
// SubscriberService.Acknowledge(ack_request);
// }
// }
// -----------------------------------------------------------------------------
// Reliability Semantics
// -----------------------------------------------------------------------------
// When a subscriber successfully creates a subscription using
// Subscriber.CreateSubscription, it establishes a "subscription point" with
// respect to that subscription - the subscriber is guaranteed to receive any
// message published after this subscription point that matches the
// subscription's query. Note that messages published before the Subscription
// point may or may not be delivered.
//
// If the system truncates the subscription according to the specified
// truncation policy, the system delivers a subscription status event with the
// "truncated" field set to true. We refer to such events as "truncation
// events". A truncation event:
//
// * Informs the subscriber that part of the subscription messages have been
// discarded. The subscriber may want to recover from the message loss, e.g.,
// by resyncing its state with its backend.
// * Establishes a new subscription point, i.e., the subscriber is guaranteed to
// receive all changes published after the trunction event is received (or
// until another truncation event is received).
//
// Note that messages are not delivered in any particular order by the pubsub
// system. Furthermore, the system guarantees at-least-once delivery
// of each message or truncation events until acked.
// -----------------------------------------------------------------------------
// Deletion
// -----------------------------------------------------------------------------
// Both topics and subscriptions may be deleted. Deletion of a topic implies
// deletion of all attached subscriptions.
//
// When a subscription is deleted directly by calling DeleteSubscription, all
// messages are immediately dropped. If it is a pull subscriber, future pull
// requests will return NOT_FOUND.
//
// When a topic is deleted all corresponding subscriptions are immediately
// deleted, and subscribers experience the same behavior as directly deleting
// the subscription.
// -----------------------------------------------------------------------------
// The Publisher service and its protos.
// -----------------------------------------------------------------------------
// The service that an application uses to manipulate topics, and to send
// messages to a topic.
service PublisherService {
// Creates the given topic with the given name.
rpc CreateTopic(Topic) returns (Topic) {
}
// Adds a message to the topic. Returns NOT_FOUND if the topic does not
// exist.
// (-- For different error code values returned via Stubby, see
// util/task/codes.proto. --)
rpc Publish(PublishRequest) returns (proto2.Empty) {
}
// Adds one or more messages to the topic. Returns NOT_FOUND if the topic does
// not exist.
rpc PublishBatch(PublishBatchRequest) returns (PublishBatchResponse) {
}
// Gets the configuration of a topic. Since the topic only has the name
// attribute, this method is only useful to check the existence of a topic.
// If other attributes are added in the future, they will be returned here.
rpc GetTopic(GetTopicRequest) returns (Topic) {
}
// Lists matching topics.
rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse) {
}
// Deletes the topic with the given name. All subscriptions to this topic
// are also deleted. Returns NOT_FOUND if the topic does not exist.
// After a topic is deleted, a new topic may be created with the same name.
rpc DeleteTopic(DeleteTopicRequest) returns (proto2.Empty) {
}
}
// A topic resource.
message Topic {
// Name of the topic.
optional string name = 1;
}
// A message data and its labels.
message PubsubMessage {
// The message payload.
optional bytes data = 1;
// Optional list of labels for this message. Keys in this collection must
// be unique.
//(-- TODO(eschapira): Define how key namespace may be scoped to the topic.--)
repeated tech.label.Label label = 2;
// ID of this message assigned by the server at publication time. Guaranteed
// to be unique within the topic. This value may be read by a subscriber
// that receives a PubsubMessage via a Pull call or a push delivery. It must
// not be populated by a publisher in a Publish call.
optional string message_id = 3;
}
// Request for the GetTopic method.
message GetTopicRequest {
// The name of the topic to get.
optional string topic = 1;
}
// Request for the Publish method.
message PublishRequest {
// The message in the request will be published on this topic.
optional string topic = 1;
// The message to publish.
optional PubsubMessage message = 2;
}
// Request for the PublishBatch method.
message PublishBatchRequest {
// The messages in the request will be published on this topic.
optional string topic = 1;
// The messages to publish.
repeated PubsubMessage messages = 2;
}
// Response for the PublishBatch method.
message PublishBatchResponse {
// The server-assigned ID of each published message, in the same order as
// the messages in the request. IDs are guaranteed to be unique within
// the topic.
repeated string message_ids = 1;
}
// Request for the ListTopics method.
message ListTopicsRequest {
// A valid label query expression.
// (-- Which labels are required or supported is implementation-specific. --)
optional string query = 1;
// Maximum number of topics to return.
// (-- If not specified or <= 0, the implementation will select a reasonable
// value. --)
optional int32 max_results = 2;
// The value obtained in the last <code>ListTopicsResponse</code>
// for continuation.
optional string page_token = 3;
}
// Response for the ListTopics method.
message ListTopicsResponse {
// The resulting topics.
repeated Topic topic = 1;
// If not empty, indicates that there are more topics that match the request,
// and this value should be passed to the next <code>ListTopicsRequest</code>
// to continue.
optional string next_page_token = 2;
}
// Request for the Delete method.
message DeleteTopicRequest {
// Name of the topic to delete.
optional string topic = 1;
}
// -----------------------------------------------------------------------------
// The Subscriber service and its protos.
// -----------------------------------------------------------------------------
// The service that an application uses to manipulate subscriptions and to
// consume messages from a subscription via the pull method.
service SubscriberService {
// Creates a subscription on a given topic for a given subscriber.
// If the subscription already exists, returns ALREADY_EXISTS.
// If the corresponding topic doesn't exist, returns NOT_FOUND.
//
// If the name is not provided in the request, the server will assign a random
// name for this subscription on the same project as the topic.
rpc CreateSubscription(Subscription) returns (Subscription) {
}
// Gets the configuration details of a subscription.
rpc GetSubscription(GetSubscriptionRequest) returns (Subscription) {
}
// Lists matching subscriptions.
rpc ListSubscriptions(ListSubscriptionsRequest)
returns (ListSubscriptionsResponse) {
}
// Deletes an existing subscription. All pending messages in the subscription
// are immediately dropped. Calls to Pull after deletion will return
// NOT_FOUND.
rpc DeleteSubscription(DeleteSubscriptionRequest) returns (proto2.Empty) {
}
// Removes all the pending messages in the subscription and releases the
// storage associated with them. Results in a truncation event to be sent to
// the subscriber. Messages added after this call returns are stored in the
// subscription as before.
rpc TruncateSubscription(TruncateSubscriptionRequest) returns (proto2.Empty) {
}
//
// Push subscriber calls.
//
// Modifies the <code>PushConfig</code> for a specified subscription.
// This method can be used to suspend the flow of messages to an endpoint
// by clearing the <code>PushConfig</code> field in the request. Messages
// will be accumulated for delivery even if no push configuration is
// defined or while the configuration is modified.
rpc ModifyPushConfig(ModifyPushConfigRequest) returns (proto2.Empty) {
}
//
// Pull Subscriber calls
//
// Pulls a single message from the server.
// If return_immediately is true, and no messages are available in the
// subscription, this method returns FAILED_PRECONDITION. The system is free
// to return an UNAVAILABLE error if no messages are available in a
// reasonable amount of time (to reduce system load).
rpc Pull(PullRequest) returns (PullResponse) {
}
// Pulls messages from the server. Returns an empty list if there are no
// messages available in the backlog. The system is free to return UNAVAILABLE
// if there are too many pull requests outstanding for the given subscription.
rpc PullBatch(PullBatchRequest) returns (PullBatchResponse) {
}
// Modifies the Ack deadline for a message received from a pull request.
rpc ModifyAckDeadline(ModifyAckDeadlineRequest) returns (proto2.Empty) {
}
// Acknowledges a particular received message: the Pub/Sub system can remove
// the given message from the subscription. Acknowledging a message whose
// Ack deadline has expired may succeed, but the message could have been
// already redelivered. Acknowledging a message more than once will not
// result in an error. This is only used for messages received via pull.
rpc Acknowledge(AcknowledgeRequest) returns (proto2.Empty) {
}
// Refuses processing a particular received message. The system will
// redeliver this message to some consumer of the subscription at some
// future time. This is only used for messages received via pull.
rpc Nack(NackRequest) returns (proto2.Empty) {
}
}
// A subscription resource.
message Subscription {
// Name of the subscription.
optional string name = 1;
// The name of the topic from which this subscription is receiving messages.
optional string topic = 2;
// If <code>query</code> is non-empty, only messages on the subscriber's
// topic whose labels match the query will be returned. Otherwise all
// messages on the topic will be returned.
// (-- The query syntax is described in tech/label/proto/label_query.proto --)
optional string query = 3;
// The subscriber may specify requirements for truncating unacknowledged
// subscription entries. The system will honor the
// <code>CreateSubscription</code> request only if it can meet these
// requirements. If this field is not specified, messages are never truncated
// by the system.
optional TruncationPolicy truncation_policy = 4;
// Specifies which messages can be truncated by the system.
message TruncationPolicy {
oneof policy {
// If <code>max_bytes</code> is specified, the system is allowed to drop
// old messages to keep the combined size of stored messages under
// <code>max_bytes</code>. This is a hint; the system may keep more than
// this many bytes, but will make a best effort to keep the size from
// growing much beyond this parameter.
int64 max_bytes = 1;
// If <code>max_age_seconds</code> is specified, the system is allowed to
// drop messages that have been stored for at least this many seconds.
// This is a hint; the system may keep these messages, but will make a
// best effort to remove them when their maximum age is reached.
int64 max_age_seconds = 2;
}
}
// If push delivery is used with this subscription, this field is
// used to configure it.
optional PushConfig push_config = 5;
// For either push or pull delivery, the value is the maximum time after a
// subscriber receives a message before the subscriber should acknowledge or
// Nack the message. If the Ack deadline for a message passes without an
// Ack or a Nack, the Pub/Sub system will eventually redeliver the message.
// If a subscriber acknowledges after the deadline, the Pub/Sub system may
// accept the Ack, but it is possible that the message has been already
// delivered again. Multiple Acks to the message are allowed and will
// succeed.
//
// For push delivery, this value is used to set the request timeout for
// the call to the push endpoint.
//
// For pull delivery, this value is used as the initial value for the Ack
// deadline. It may be overridden for a specific pull request (message) with
// <code>ModifyAckDeadline</code>.
// While a message is outstanding (i.e. it has been delivered to a pull
// subscriber and the subscriber has not yet Acked or Nacked), the Pub/Sub
// system will not deliver that message to another pull subscriber
// (on a best-effort basis).
optional int32 ack_deadline_seconds = 6;
// If this parameter is set to n, the system is allowed to (but not required
// to) delete the subscription when at least n seconds have elapsed since the
// client presence was detected. (Presence is detected through any
// interaction using the subscription ID, including Pull(), Get(), or
// acknowledging a message.)
//
// If this parameter is not set, the subscription will stay live until
// explicitly deleted.
//
// Clients can detect such garbage collection when a Get call or a Pull call
// (for pull subscribers only) returns NOT_FOUND.
optional int64 garbage_collect_seconds = 7;
}
// Configuration for a push delivery endpoint.
message PushConfig {
// A URL locating the endpoint to which messages should be pushed.
// For example, a Webhook endpoint might use "https://example.com/push".
// (-- An Android application might use "gcm:<REGID>", where <REGID> is a
// GCM registration id allocated for pushing messages to the application. --)
optional string push_endpoint = 1;
}
// An event indicating a received message or truncation event.
message PubsubEvent {
// The subscription that received the event.
optional string subscription = 1;
oneof type {
// A received message.
PubsubMessage message = 2;
// Indicates that this subscription has been truncated.
bool truncated = 3;
// Indicates that this subscription has been deleted. (Note that pull
// subscribers will always receive NOT_FOUND in response in their pull
// request on the subscription, rather than seeing this boolean.)
bool deleted = 4;
}
}
// Request for the GetSubscription method.
message GetSubscriptionRequest {
// The name of the subscription to get.
optional string subscription = 1;
}
// Request for the ListSubscriptions method.
message ListSubscriptionsRequest {
// A valid label query expression.
// (-- Which labels are required or supported is implementation-specific.
// TODO(eschapira): This method must support to query by topic. We must
// define the key URI for the "topic" label. --)
optional string query = 1;
// Maximum number of subscriptions to return.
// (-- If not specified or <= 0, the implementation will select a reasonable
// value. --)
optional int32 max_results = 3;
// The value obtained in the last <code>ListSubscriptionsResponse</code>
// for continuation.
optional string page_token = 4;
}
// Response for the ListSubscriptions method.
message ListSubscriptionsResponse {
// The subscriptions that match the request.
repeated Subscription subscription = 1;
// If not empty, indicates that there are more subscriptions that match the
// request and this value should be passed to the next
// <code>ListSubscriptionsRequest</code> to continue.
optional string next_page_token = 2;
}
// Request for the TruncateSubscription method.
message TruncateSubscriptionRequest {
// The subscription that is being truncated.
optional string subscription = 1;
}
// Request for the DeleteSubscription method.
message DeleteSubscriptionRequest {
// The subscription to delete.
optional string subscription = 1;
}
// Request for the ModifyPushConfig method.
message ModifyPushConfigRequest {
// The name of the subscription.
optional string subscription = 1;
// An empty <code>push_config</code> indicates that the Pub/Sub system should
// pause pushing messages from the given subscription.
optional PushConfig push_config = 2;
}
// -----------------------------------------------------------------------------
// The protos used by a pull subscriber.
// -----------------------------------------------------------------------------
// Request for the Pull method.
message PullRequest {
// The subscription from which a message should be pulled.
optional string subscription = 1;
// If this is specified as true the system will respond immediately even if
// it is not able to return a message in the Pull response. Otherwise the
// system is allowed to wait until at least one message is available rather
// than returning FAILED_PRECONDITION. The client may cancel the request if
// it does not wish to wait any longer for the response.
optional bool return_immediately = 2;
}
// Either a <code>PubsubMessage</code> or a truncation event. One of these two
// must be populated.
message PullResponse {
// This ID must be used to acknowledge the received event or message.
optional string ack_id = 1;
// A pubsub message or truncation event.
optional PubsubEvent pubsub_event = 2;
}
// Request for the PullBatch method.
message PullBatchRequest {
// The subscription from which messages should be pulled.
optional string subscription = 1;
// If this is specified as true the system will respond immediately even if
// it is not able to return a message in the Pull response. Otherwise the
// system is allowed to wait until at least one message is available rather
// than returning no messages. The client may cancel the request if it does
// not wish to wait any longer for the response.
optional bool return_immediately = 2;
// The maximum number of PubsubEvents returned for this request. The Pub/Sub
// system may return fewer than the number of events specified.
optional int32 max_events = 3;
}
// Response for the PullBatch method.
message PullBatchResponse {
// Received Pub/Sub messages or status events. The Pub/Sub system will return
// zero messages if there are no more messages available in the backlog. The
// Pub/Sub system may return fewer than the max_events requested even if
// there are more messages available in the backlog.
repeated PullResponse pull_responses = 2;
}
// Request for the ModifyAckDeadline method.
message ModifyAckDeadlineRequest {
// The name of the subscription from which messages are being pulled.
optional string subscription = 1;
// The acknowledgment ID.
optional string ack_id = 2;
// The new Ack deadline. Must be >= 0.
optional int32 ack_deadline_seconds = 3;
}
// Request for the Acknowledge method.
message AcknowledgeRequest {
// The subscription whose message is being acknowledged.
optional string subscription = 1;
// The acknowledgment ID for the message being acknowledged. This was
// returned by the Pub/Sub system in the Pull response.
repeated string ack_id = 2;
}
// Request for the Nack method.
message NackRequest {
// The subscription whose message is being Nacked.
optional string subscription = 1;
// The acknowledgment ID for the message being refused. This was returned by
// the Pub/Sub system in the Pull response.
repeated string ack_id = 2;
}
// -----------------------------------------------------------------------------
// The service and protos used by a push subscriber.
// -----------------------------------------------------------------------------
// The service that a subscriber uses to handle messages sent via push
// delivery.
// This service is not currently exported for HTTP clients.
// TODO(eschapira): Explain HTTP subscribers.
service PushEndpointService {
// Sends a <code>PubsubMessage</code> or a subscription status event to a
// push endpoint.
// The push endpoint responds with an empty message and a code from
// util/task/codes.proto. The following codes have a particular meaning to the
// Pub/Sub system:
// OK - This is interpreted by Pub/Sub as Ack.
// ABORTED - This is intepreted by Pub/Sub as a Nack, without implying
// pushback for congestion control. The Pub/Sub system will
// retry this message at a later time.
// UNAVAILABLE - This is intepreted by Pub/Sub as a Nack, with the additional
// semantics of push-back. The Pub/Sub system will use an AIMD
// congestion control algorithm to backoff the rate of sending
// messages from this subscription.
// Any other code, or a failure to respond, will be interpreted in the same
// way as ABORTED; i.e. the system will retry the message at a later time to
// ensure reliable delivery.
rpc HandlePubsubEvent(PubsubEvent) returns (proto2.Empty);
}

@ -428,7 +428,8 @@ grpc_server *grpc_server_create(grpc_completion_queue *cq,
REQUIRES: server not started */ REQUIRES: server not started */
int grpc_server_add_http2_port(grpc_server *server, const char *addr); int grpc_server_add_http2_port(grpc_server *server, const char *addr);
/* Add a secure port to server; returns 1 on success, 0 on failure /* Add a secure port to server.
Returns bound port number on success, 0 on failure.
REQUIRES: server not started */ REQUIRES: server not started */
int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr); int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr);

@ -132,6 +132,14 @@
#error Must define exactly one of GPR_CPU_LINUX, GPR_CPU_POSIX, GPR_WIN32 #error Must define exactly one of GPR_CPU_LINUX, GPR_CPU_POSIX, GPR_WIN32
#endif #endif
#if defined(GPR_POSIX_MULTIPOLL_WITH_POLL) && !defined(GPR_POSIX_SOCKET)
#error Must define GPR_POSIX_SOCKET to use GPR_POSIX_MULTIPOLL_WITH_POLL
#endif
#if defined(GPR_POSIX_SOCKET) + defined(GPR_WIN32) != 1
#error Must define exactly one of GPR_POSIX_POLLSET, GPR_WIN32
#endif
typedef int16_t gpr_int16; typedef int16_t gpr_int16;
typedef int32_t gpr_int32; typedef int32_t gpr_int32;
typedef int64_t gpr_int64; typedef int64_t gpr_int64;

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/endpoint_pair.h" #include "src/core/iomgr/endpoint_pair.h"
#include <errno.h> #include <errno.h>
@ -59,3 +63,5 @@ grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size) {
p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size); p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size);
return p; return p;
} }
#endif

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/fd_posix.h" #include "src/core/iomgr/fd_posix.h"
#include <assert.h> #include <assert.h>
@ -272,3 +276,5 @@ void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) { void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
set_ready(fd, &fd->writest, allow_synchronous_callback); set_ready(fd, &fd->writest, allow_synchronous_callback);
} }
#endif

@ -48,6 +48,10 @@
#include "src/core/iomgr/pollset_posix.h" #include "src/core/iomgr/pollset_posix.h"
#endif #endif
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_windows.h"
#endif
void grpc_pollset_init(grpc_pollset *pollset); void grpc_pollset_init(grpc_pollset *pollset);
void grpc_pollset_destroy(grpc_pollset *pollset); void grpc_pollset_destroy(grpc_pollset *pollset);

@ -41,8 +41,10 @@
#ifdef GPR_POSIX_SOCKET #ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_kick_posix.h" #include "src/core/iomgr/pollset_kick_posix.h"
#else #endif
#error "No pollset kick support on platform"
#ifdef GPR_WIN32
#include "src/core/iomgr/pollset_kick_windows.h"
#endif #endif
void grpc_pollset_kick_global_init(void); void grpc_pollset_kick_global_init(void);

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_kick_posix.h" #include "src/core/iomgr/pollset_kick_posix.h"
#include <errno.h> #include <errno.h>
@ -175,3 +179,5 @@ void grpc_pollset_kick_kick(grpc_pollset_kick_state *kick_state) {
} }
gpr_mu_unlock(&kick_state->mu); gpr_mu_unlock(&kick_state->mu);
} }
#endif

@ -0,0 +1,45 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __GRPC_INTERNAL_IOMGR_POLLSET_KICK_WINDOWS_H_
#define __GRPC_INTERNAL_IOMGR_POLLSET_KICK_WINDOWS_H_
#include <grpc/support/sync.h>
struct grpc_kick_pipe_info;
typedef struct grpc_pollset_kick_state {
int unused;
} grpc_pollset_kick_state;
#endif /* __GRPC_INTERNAL_IOMGR_POLLSET_KICK_WINDOWS_H_ */

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/pollset_posix.h" #include "src/core/iomgr/pollset_posix.h"
#include <errno.h> #include <errno.h>
@ -288,3 +292,5 @@ static void become_unary_pollset(grpc_pollset *pollset, grpc_fd *fd) {
pollset->data.ptr = fd; pollset->data.ptr = fd;
grpc_fd_ref(fd); grpc_fd_ref(fd);
} }
#endif /* GPR_POSIX_POLLSET */

@ -0,0 +1,38 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <grpc/support/port_platform.h>
#ifdef GPR_WIN32
#endif /* GPR_WIN32 */

@ -0,0 +1,54 @@
/*
*
* Copyright 2014, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __GRPC_INTERNAL_IOMGR_POLLSET_WINDOWS_H_
#define __GRPC_INTERNAL_IOMGR_POLLSET_WINDOWS_H_
#include <grpc/support/sync.h>
#include "src/core/iomgr/pollset_kick.h"
/* forward declare only in this file to avoid leaking impl details via
pollset.h; real users of grpc_fd should always include 'fd_posix.h' and not
use the struct tag */
struct grpc_fd;
typedef struct grpc_pollset {
gpr_mu mu;
gpr_cv cv;
} grpc_pollset;
#define GRPC_POLLSET_MU(pollset) (&(pollset)->mu)
#define GRPC_POLLSET_CV(pollset) (&(pollset)->cv)
#endif /* __GRPC_INTERNAL_IOMGR_POLLSET_WINDOWS_H_ */

@ -34,10 +34,12 @@
#ifndef __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__ #ifndef __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__
#define __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__ #define __GRPC_INTERNAL_IOMGR_RESOLVE_ADDRESS_H__
#include <sys/socket.h> #include <stddef.h>
#define GRPC_MAX_SOCKADDR_SIZE 128
typedef struct { typedef struct {
struct sockaddr_storage addr; char addr[GRPC_MAX_SOCKADDR_SIZE];
int len; int len;
} grpc_resolved_address; } grpc_resolved_address;

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/socket_utils_posix.h" #include "src/core/iomgr/socket_utils_posix.h"
#include <arpa/inet.h> #include <arpa/inet.h>
@ -99,7 +103,7 @@ int grpc_set_socket_reuse_addr(int fd, int reuse) {
socklen_t intlen = sizeof(newval); socklen_t intlen = sizeof(newval);
return 0 == setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) && return 0 == setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)) &&
0 == getsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &newval, &intlen) && 0 == getsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &newval, &intlen) &&
newval == val; (newval != 0) == val;
} }
/* disable nagle */ /* disable nagle */
@ -109,7 +113,7 @@ int grpc_set_socket_low_latency(int fd, int low_latency) {
socklen_t intlen = sizeof(newval); socklen_t intlen = sizeof(newval);
return 0 == setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) && return 0 == setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)) &&
0 == getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &newval, &intlen) && 0 == getsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &newval, &intlen) &&
newval == val; (newval != 0) == val;
} }
static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT; static gpr_once g_probe_ipv6_once = GPR_ONCE_INIT;
@ -187,3 +191,5 @@ int grpc_create_dualstack_socket(const struct sockaddr *addr, int type,
*dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE; *dsmode = family == AF_INET ? GRPC_DSMODE_IPV4 : GRPC_DSMODE_NONE;
return socket(family, type, protocol); return socket(family, type, protocol);
} }
#endif

@ -50,12 +50,22 @@ int grpc_accept4(int sockfd, struct sockaddr *addr, socklen_t *addrlen,
fd = accept(sockfd, addr, addrlen); fd = accept(sockfd, addr, addrlen);
if (fd >= 0) { if (fd >= 0) {
flags = fcntl(fd, F_GETFL, 0); if (nonblock) {
flags |= nonblock ? O_NONBLOCK : 0; flags = fcntl(fd, F_GETFL, 0);
flags |= cloexec ? FD_CLOEXEC : 0; if (flags < 0) goto close_and_error;
GPR_ASSERT(fcntl(fd, F_SETFL, flags) == 0); if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) != 0) goto close_and_error;
}
if (cloexec) {
flags = fcntl(fd, F_GETFD, 0);
if (flags < 0) goto close_and_error;
if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) != 0) goto close_and_error;
}
} }
return fd; return fd;
close_and_error:
close(fd);
return -1;
} }
#endif /* GPR_POSIX_SOCKETUTILS */ #endif /* GPR_POSIX_SOCKETUTILS */

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/tcp_client.h" #include "src/core/iomgr/tcp_client.h"
#include <errno.h> #include <errno.h>
@ -229,3 +233,5 @@ void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now()); grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
grpc_fd_notify_on_write(ac->fd, on_writable, ac); grpc_fd_notify_on_write(ac->fd, on_writable, ac);
} }
#endif

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/tcp_posix.h" #include "src/core/iomgr/tcp_posix.h"
#include <errno.h> #include <errno.h>
@ -539,3 +543,5 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size) {
tcp->em_fd = em_fd; tcp->em_fd = em_fd;
return &tcp->base; return &tcp->base;
} }
#endif

@ -34,9 +34,6 @@
#ifndef __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__ #ifndef __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__
#define __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__ #define __GRPC_INTERNAL_IOMGR_TCP_SERVER_H__
#include <sys/types.h>
#include <sys/socket.h>
#include "src/core/iomgr/endpoint.h" #include "src/core/iomgr/endpoint.h"
/* Forward decl of grpc_tcp_server */ /* Forward decl of grpc_tcp_server */
@ -63,7 +60,7 @@ void grpc_tcp_server_start(grpc_tcp_server *server, grpc_pollset *pollset,
For raw access to the underlying sockets, see grpc_tcp_server_get_fd(). */ For raw access to the underlying sockets, see grpc_tcp_server_get_fd(). */
/* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle /* TODO(ctiller): deprecate this, and make grpc_tcp_server_add_ports to handle
all of the multiple socket port matching logic in one place */ all of the multiple socket port matching logic in one place */
int grpc_tcp_server_add_port(grpc_tcp_server *s, const struct sockaddr *addr, int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
int addr_len); int addr_len);
/* Returns the file descriptor of the Nth listening socket on this server, /* Returns the file descriptor of the Nth listening socket on this server,

@ -31,6 +31,10 @@
* *
*/ */
#include <grpc/support/port_platform.h>
#ifdef GPR_POSIX_SOCKET
#define _GNU_SOURCE #define _GNU_SOURCE
#include "src/core/iomgr/tcp_server.h" #include "src/core/iomgr/tcp_server.h"
@ -252,7 +256,7 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
if (s->nports == s->port_capacity) { if (s->nports == s->port_capacity) {
s->port_capacity *= 2; s->port_capacity *= 2;
s->ports = s->ports =
gpr_realloc(s->ports, sizeof(server_port *) * s->port_capacity); gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
} }
sp = &s->ports[s->nports++]; sp = &s->ports[s->nports++];
sp->server = s; sp->server = s;
@ -265,7 +269,7 @@ static int add_socket_to_server(grpc_tcp_server *s, int fd,
return port; return port;
} }
int grpc_tcp_server_add_port(grpc_tcp_server *s, const struct sockaddr *addr, int grpc_tcp_server_add_port(grpc_tcp_server *s, const void *addr,
int addr_len) { int addr_len) {
int allocated_port1 = -1; int allocated_port1 = -1;
int allocated_port2 = -1; int allocated_port2 = -1;
@ -364,3 +368,5 @@ void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset *pollset,
} }
gpr_mu_unlock(&s->mu); gpr_mu_unlock(&s->mu);
} }
#endif

@ -93,6 +93,8 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
grpc_tcp_server *tcp = NULL; grpc_tcp_server *tcp = NULL;
size_t i; size_t i;
int count = 0; int count = 0;
int port_num = -1;
int port_temp;
resolved = grpc_blocking_resolve_address(addr, "https"); resolved = grpc_blocking_resolve_address(addr, "https");
if (!resolved) { if (!resolved) {
@ -105,9 +107,15 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
} }
for (i = 0; i < resolved->naddrs; i++) { for (i = 0; i < resolved->naddrs; i++) {
if (grpc_tcp_server_add_port(tcp, port_temp = grpc_tcp_server_add_port(
(struct sockaddr *)&resolved->addrs[i].addr, tcp, (struct sockaddr *)&resolved->addrs[i].addr,
resolved->addrs[i].len)) { resolved->addrs[i].len);
if (port_temp >= 0) {
if (port_num == -1) {
port_num = port_temp;
} else {
GPR_ASSERT(port_num == port_temp);
}
count++; count++;
} }
} }
@ -125,7 +133,7 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr) {
/* Register with the server only upon success */ /* Register with the server only upon success */
grpc_server_add_listener(server, tcp, start, destroy); grpc_server_add_listener(server, tcp, start, destroy);
return 1; return port_num;
/* Error path: cleanup and return */ /* Error path: cleanup and return */
error: error:

@ -61,7 +61,7 @@ gpr_timespec gpr_now(void) {
struct timeval now_tv; struct timeval now_tv;
gettimeofday(&now_tv, NULL); gettimeofday(&now_tv, NULL);
now.tv_sec = now_tv.tv_sec; now.tv_sec = now_tv.tv_sec;
now.tv_nsec = now_tv.tv_usec / 1000; now.tv_nsec = now_tv.tv_usec * 1000;
return now; return now;
} }
#endif #endif

@ -141,7 +141,7 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
gpr_slice_sub(slice, cur - beg, end - beg)); gpr_slice_sub(slice, cur - beg, end - beg));
p->state = GRPC_CHTTP2_DATA_FH_0; p->state = GRPC_CHTTP2_DATA_FH_0;
return GRPC_CHTTP2_PARSE_OK; return GRPC_CHTTP2_PARSE_OK;
} else if (end - cur > p->frame_size) { } else if ((gpr_uint32)(end - cur) > p->frame_size) {
state->need_flush_reads = 1; state->need_flush_reads = 1;
grpc_sopb_add_slice( grpc_sopb_add_slice(
&p->incoming_sopb, &p->incoming_sopb,

@ -1212,7 +1212,7 @@ static int huff_nibble(grpc_chttp2_hpack_parser *p, gpr_uint8 nibble) {
gpr_int16 next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble]; gpr_int16 next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
if (emit != -1) { if (emit != -1) {
if (emit >= 0 && emit < 256) { if (emit >= 0 && emit < 256) {
gpr_uint8 c = emit; gpr_uint8 c = (gpr_uint8) emit;
if (!append_string(p, &c, (&c) + 1)) return 0; if (!append_string(p, &c, (&c) + 1)) return 0;
} else { } else {
assert(emit == 256); assert(emit == 256);

@ -1611,7 +1611,7 @@ static int process_read(transport *t, gpr_slice slice) {
} }
t->deframe_state = DTS_FH_0; t->deframe_state = DTS_FH_0;
return 1; return 1;
} else if (end - cur > t->incoming_frame_size) { } else if ((gpr_uint32)(end - cur) > t->incoming_frame_size) {
if (!parse_frame_slice( if (!parse_frame_slice(
t, gpr_slice_sub_no_ref(slice, cur - beg, t, gpr_slice_sub_no_ref(slice, cur - beg,
cur + t->incoming_frame_size - beg), cur + t->incoming_frame_size - beg),

@ -63,7 +63,7 @@ void grpc_sopb_reset(grpc_stream_op_buffer *sopb) {
} }
void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) { void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
int i; size_t i;
for (i = 0; i < nops; i++) { for (i = 0; i < nops; i++) {
switch (ops[i].type) { switch (ops[i].type) {
case GRPC_OP_SLICE: case GRPC_OP_SLICE:

@ -45,10 +45,22 @@ util.inherits(GrpcClientStream, Duplex);
* from stream.Duplex. * from stream.Duplex.
* @constructor * @constructor
* @param {grpc.Call} call Call object to proxy * @param {grpc.Call} call Call object to proxy
* @param {object} options Stream options * @param {function(*):Buffer=} serialize Serialization function for requests
* @param {function(Buffer):*=} deserialize Deserialization function for
* responses
*/ */
function GrpcClientStream(call, options) { function GrpcClientStream(call, serialize, deserialize) {
Duplex.call(this, options); Duplex.call(this, {objectMode: true});
if (!serialize) {
serialize = function(value) {
return value;
};
}
if (!deserialize) {
deserialize = function(value) {
return value;
};
}
var self = this; var self = this;
// Indicates that we can start reading and have not received a null read // Indicates that we can start reading and have not received a null read
var can_read = false; var can_read = false;
@ -59,6 +71,32 @@ function GrpcClientStream(call, options) {
// Indicates that a write is currently pending // Indicates that a write is currently pending
var writing = false; var writing = false;
this._call = call; this._call = call;
/**
* Serialize a request value to a buffer. Always maps null to null. Otherwise
* uses the provided serialize function
* @param {*} value The value to serialize
* @return {Buffer} The serialized value
*/
this.serialize = function(value) {
if (value === null || value === undefined) {
return null;
}
return serialize(value);
};
/**
* Deserialize a response buffer to a value. Always maps null to null.
* Otherwise uses the provided deserialize function.
* @param {Buffer} buffer The buffer to deserialize
* @return {*} The deserialized value
*/
this.deserialize = function(buffer) {
if (buffer === null) {
return null;
}
return deserialize(buffer);
};
/** /**
* Callback to handle receiving a READ event. Pushes the data from that event * Callback to handle receiving a READ event. Pushes the data from that event
* onto the read queue and starts reading again if applicable. * onto the read queue and starts reading again if applicable.
@ -66,7 +104,7 @@ function GrpcClientStream(call, options) {
*/ */
function readCallback(event) { function readCallback(event) {
var data = event.data; var data = event.data;
if (self.push(data)) { if (self.push(self.deserialize(data))) {
if (data == null) { if (data == null) {
// Disable starting to read after null read was received // Disable starting to read after null read was received
can_read = false; can_read = false;
@ -102,7 +140,7 @@ function GrpcClientStream(call, options) {
next.callback(); next.callback();
writeNext(); writeNext();
}; };
call.startWrite(next.chunk, writeCallback, 0); call.startWrite(self.serialize(next.chunk), writeCallback, 0);
} else { } else {
writing = false; writing = false;
} }
@ -171,6 +209,9 @@ GrpcClientStream.prototype._write = function(chunk, encoding, callback) {
* Make a request on the channel to the given method with the given arguments * Make a request on the channel to the given method with the given arguments
* @param {grpc.Channel} channel The channel on which to make the request * @param {grpc.Channel} channel The channel on which to make the request
* @param {string} method The method to request * @param {string} method The method to request
* @param {function(*):Buffer} serialize Serialization function for requests
* @param {function(Buffer):*} deserialize Deserialization function for
* responses
* @param {array=} metadata Array of metadata key/value pairs to add to the call * @param {array=} metadata Array of metadata key/value pairs to add to the call
* @param {(number|Date)=} deadline The deadline for processing this request. * @param {(number|Date)=} deadline The deadline for processing this request.
* Defaults to infinite future. * Defaults to infinite future.
@ -178,6 +219,8 @@ GrpcClientStream.prototype._write = function(chunk, encoding, callback) {
*/ */
function makeRequest(channel, function makeRequest(channel,
method, method,
serialize,
deserialize,
metadata, metadata,
deadline) { deadline) {
if (deadline === undefined) { if (deadline === undefined) {

@ -157,7 +157,8 @@ function handleHalfDuplex(call) {
* Get a server object bound to the given port * Get a server object bound to the given port
* @param {string} port Port to which to bind * @param {string} port Port to which to bind
* @param {boolean} tls Indicates that the bound port should use TLS * @param {boolean} tls Indicates that the bound port should use TLS
* @return {Server} Server object bound to the support * @return {{server: Server, port: number}} Server object bound to the support,
* and port number that the server is bound to
*/ */
function getServer(port, tls) { function getServer(port, tls) {
// TODO(mlumish): enable TLS functionality // TODO(mlumish): enable TLS functionality
@ -183,8 +184,8 @@ function getServer(port, tls) {
halfDuplexCall: handleHalfDuplex halfDuplexCall: handleHalfDuplex
} }
}, options); }, options);
server.bind('0.0.0.0:' + port, tls); var port_num = server.bind('0.0.0.0:' + port, tls);
return server; return {server: server, port: port_num};
} }
if (require.main === module) { if (require.main === module) {
@ -192,8 +193,8 @@ if (require.main === module) {
var argv = parseArgs(process.argv, { var argv = parseArgs(process.argv, {
string: ['port', 'use_tls'] string: ['port', 'use_tls']
}); });
var server = getServer(argv.port, argv.use_tls === 'true'); var server_obj = getServer(argv.port, argv.use_tls === 'true');
server.start(); server_obj.server.start();
} }
/** /**

@ -194,7 +194,7 @@ NAN_METHOD(Server::AddHttp2Port) {
return NanThrowTypeError("addHttp2Port's argument must be a String"); return NanThrowTypeError("addHttp2Port's argument must be a String");
} }
Server *server = ObjectWrap::Unwrap<Server>(args.This()); Server *server = ObjectWrap::Unwrap<Server>(args.This());
NanReturnValue(NanNew<Boolean>(grpc_server_add_http2_port( NanReturnValue(NanNew<Number>(grpc_server_add_http2_port(
server->wrapped_server, *NanUtf8String(args[0])))); server->wrapped_server, *NanUtf8String(args[0]))));
} }
@ -208,7 +208,7 @@ NAN_METHOD(Server::AddSecureHttp2Port) {
return NanThrowTypeError("addSecureHttp2Port's argument must be a String"); return NanThrowTypeError("addSecureHttp2Port's argument must be a String");
} }
Server *server = ObjectWrap::Unwrap<Server>(args.This()); Server *server = ObjectWrap::Unwrap<Server>(args.This());
NanReturnValue(NanNew<Boolean>(grpc_server_add_secure_http2_port( NanReturnValue(NanNew<Number>(grpc_server_add_secure_http2_port(
server->wrapped_server, *NanUtf8String(args[0])))); server->wrapped_server, *NanUtf8String(args[0]))));
} }

@ -47,10 +47,22 @@ util.inherits(GrpcServerStream, Duplex);
* from stream.Duplex. * from stream.Duplex.
* @constructor * @constructor
* @param {grpc.Call} call Call object to proxy * @param {grpc.Call} call Call object to proxy
* @param {object} options Stream options * @param {function(*):Buffer=} serialize Serialization function for responses
* @param {function(Buffer):*=} deserialize Deserialization function for
* requests
*/ */
function GrpcServerStream(call, options) { function GrpcServerStream(call, serialize, deserialize) {
Duplex.call(this, options); Duplex.call(this, {objectMode: true});
if (!serialize) {
serialize = function(value) {
return value;
};
}
if (!deserialize) {
deserialize = function(value) {
return value;
};
}
this._call = call; this._call = call;
// Indicate that a status has been sent // Indicate that a status has been sent
var finished = false; var finished = false;
@ -59,6 +71,33 @@ function GrpcServerStream(call, options) {
'code' : grpc.status.OK, 'code' : grpc.status.OK,
'details' : 'OK' 'details' : 'OK'
}; };
/**
* Serialize a response value to a buffer. Always maps null to null. Otherwise
* uses the provided serialize function
* @param {*} value The value to serialize
* @return {Buffer} The serialized value
*/
this.serialize = function(value) {
if (value === null || value === undefined) {
return null;
}
return serialize(value);
};
/**
* Deserialize a request buffer to a value. Always maps null to null.
* Otherwise uses the provided deserialize function.
* @param {Buffer} buffer The buffer to deserialize
* @return {*} The deserialized value
*/
this.deserialize = function(buffer) {
if (buffer === null) {
return null;
}
return deserialize(buffer);
};
/** /**
* Send the pending status * Send the pending status
*/ */
@ -75,7 +114,6 @@ function GrpcServerStream(call, options) {
* @param {Error} err The error object * @param {Error} err The error object
*/ */
function setStatus(err) { function setStatus(err) {
console.log('Server setting status to', err);
var code = grpc.status.INTERNAL; var code = grpc.status.INTERNAL;
var details = 'Unknown Error'; var details = 'Unknown Error';
@ -113,7 +151,7 @@ function GrpcServerStream(call, options) {
return; return;
} }
var data = event.data; var data = event.data;
if (self.push(data) && data != null) { if (self.push(deserialize(data)) && data != null) {
self._call.startRead(readCallback); self._call.startRead(readCallback);
} else { } else {
reading = false; reading = false;
@ -155,7 +193,7 @@ GrpcServerStream.prototype._read = function(size) {
*/ */
GrpcServerStream.prototype._write = function(chunk, encoding, callback) { GrpcServerStream.prototype._write = function(chunk, encoding, callback) {
var self = this; var self = this;
self._call.startWrite(chunk, function(event) { self._call.startWrite(self.serialize(chunk), function(event) {
callback(); callback();
}, 0); }, 0);
}; };
@ -211,12 +249,13 @@ function Server(options) {
} }
}, 0); }, 0);
call.serverEndInitialMetadata(0); call.serverEndInitialMetadata(0);
var stream = new GrpcServerStream(call); var stream = new GrpcServerStream(call, handler.serialize,
handler.deserialize);
Object.defineProperty(stream, 'cancelled', { Object.defineProperty(stream, 'cancelled', {
get: function() { return cancelled;} get: function() { return cancelled;}
}); });
try { try {
handler(stream, data.metadata); handler.func(stream, data.metadata);
} catch (e) { } catch (e) {
stream.emit('error', e); stream.emit('error', e);
} }
@ -237,14 +276,20 @@ function Server(options) {
* handle/respond to. * handle/respond to.
* @param {function} handler Function that takes a stream of request values and * @param {function} handler Function that takes a stream of request values and
* returns a stream of response values * returns a stream of response values
* @param {function(*):Buffer} serialize Serialization function for responses
* @param {function(Buffer):*} deserialize Deserialization function for requests
* @return {boolean} True if the handler was set. False if a handler was already * @return {boolean} True if the handler was set. False if a handler was already
* set for that name. * set for that name.
*/ */
Server.prototype.register = function(name, handler) { Server.prototype.register = function(name, handler, serialize, deserialize) {
if (this.handlers.hasOwnProperty(name)) { if (this.handlers.hasOwnProperty(name)) {
return false; return false;
} }
this.handlers[name] = handler; this.handlers[name] = {
func: handler,
serialize: serialize,
deserialize: deserialize
};
return true; return true;
}; };
@ -256,9 +301,9 @@ Server.prototype.register = function(name, handler) {
*/ */
Server.prototype.bind = function(port, secure) { Server.prototype.bind = function(port, secure) {
if (secure) { if (secure) {
this._server.addSecureHttp2Port(port); return this._server.addSecureHttp2Port(port);
} else { } else {
this._server.addHttp2Port(port); return this._server.addHttp2Port(port);
} }
}; };

@ -357,8 +357,7 @@ function makeServerConstructor(services) {
* @return {SurfaceServer} this * @return {SurfaceServer} this
*/ */
SurfaceServer.prototype.bind = function(port, secure) { SurfaceServer.prototype.bind = function(port, secure) {
this.inner_server.bind(port, secure); return this.inner_server.bind(port, secure);
return this;
}; };
/** /**

@ -37,7 +37,6 @@ var path = require('path');
var grpc = require('bindings')('grpc.node'); var grpc = require('bindings')('grpc.node');
var Server = require('../server'); var Server = require('../server');
var client = require('../client'); var client = require('../client');
var port_picker = require('../port_picker');
var common = require('../common'); var common = require('../common');
var _ = require('highland'); var _ = require('highland');
@ -80,55 +79,50 @@ function errorHandler(stream) {
describe('echo client', function() { describe('echo client', function() {
it('should receive echo responses', function(done) { it('should receive echo responses', function(done) {
port_picker.nextAvailablePort(function(port) { var server = new Server();
var server = new Server(); var port_num = server.bind('0.0.0.0:0');
server.bind(port); server.register('echo', echoHandler);
server.register('echo', echoHandler); server.start();
server.start();
var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
var messages = ['echo1', 'echo2', 'echo3', 'echo4']; var channel = new grpc.Channel('localhost:' + port_num);
var channel = new grpc.Channel(port); var stream = client.makeRequest(
var stream = client.makeRequest( channel,
channel, 'echo');
'echo'); _(messages).map(function(val) {
_(messages).map(function(val) { return new Buffer(val);
return new Buffer(val); }).pipe(stream);
}).pipe(stream); var index = 0;
var index = 0; stream.on('data', function(chunk) {
stream.on('data', function(chunk) { assert.equal(messages[index], chunk.toString());
assert.equal(messages[index], chunk.toString()); index += 1;
index += 1; });
}); stream.on('end', function() {
stream.on('end', function() { server.shutdown();
server.shutdown(); done();
done();
});
}); });
}); });
it('should get an error status that the server throws', function(done) { it('should get an error status that the server throws', function(done) {
port_picker.nextAvailablePort(function(port) { var server = new Server();
var server = new Server(); var port_num = server.bind('0.0.0.0:0');
server.bind(port); server.register('error', errorHandler);
server.register('error', errorHandler); server.start();
server.start();
var channel = new grpc.Channel('localhost:' + port_num);
var channel = new grpc.Channel(port); var stream = client.makeRequest(
var stream = client.makeRequest( channel,
channel, 'error',
'error', null,
null, getDeadline(1));
getDeadline(1));
stream.on('data', function() {});
stream.on('data', function() {}); stream.write(new Buffer('test'));
stream.write(new Buffer('test')); stream.end();
stream.end(); stream.on('status', function(status) {
stream.on('status', function(status) { assert.equal(status.code, grpc.status.UNIMPLEMENTED);
assert.equal(status.code, grpc.status.UNIMPLEMENTED); assert.equal(status.details, 'error details');
assert.equal(status.details, 'error details'); server.shutdown();
server.shutdown(); done();
done();
});
}); });
}); });
}); });
@ -136,46 +130,43 @@ describe('echo client', function() {
* and the insecure echo client test */ * and the insecure echo client test */
describe('secure echo client', function() { describe('secure echo client', function() {
it('should recieve echo responses', function(done) { it('should recieve echo responses', function(done) {
port_picker.nextAvailablePort(function(port) { fs.readFile(ca_path, function(err, ca_data) {
fs.readFile(ca_path, function(err, ca_data) { assert.ifError(err);
fs.readFile(key_path, function(err, key_data) {
assert.ifError(err); assert.ifError(err);
fs.readFile(key_path, function(err, key_data) { fs.readFile(pem_path, function(err, pem_data) {
assert.ifError(err); assert.ifError(err);
fs.readFile(pem_path, function(err, pem_data) { var creds = grpc.Credentials.createSsl(ca_data);
assert.ifError(err); var server_creds = grpc.ServerCredentials.createSsl(null,
var creds = grpc.Credentials.createSsl(ca_data); key_data,
var server_creds = grpc.ServerCredentials.createSsl(null, pem_data);
key_data,
pem_data); var server = new Server({'credentials' : server_creds});
var port_num = server.bind('0.0.0.0:0', true);
var server = new Server({'credentials' : server_creds}); server.register('echo', echoHandler);
server.bind(port, true); server.start();
server.register('echo', echoHandler);
server.start(); var messages = ['echo1', 'echo2', 'echo3', 'echo4'];
var channel = new grpc.Channel('localhost:' + port_num, {
var messages = ['echo1', 'echo2', 'echo3', 'echo4']; 'grpc.ssl_target_name_override' : 'foo.test.google.com',
var channel = new grpc.Channel(port, { 'credentials' : creds
'grpc.ssl_target_name_override' : 'foo.test.google.com', });
'credentials' : creds var stream = client.makeRequest(
}); channel,
var stream = client.makeRequest( 'echo');
channel,
'echo'); _(messages).map(function(val) {
return new Buffer(val);
_(messages).map(function(val) { }).pipe(stream);
return new Buffer(val); var index = 0;
}).pipe(stream); stream.on('data', function(chunk) {
var index = 0; assert.equal(messages[index], chunk.toString());
stream.on('data', function(chunk) { index += 1;
assert.equal(messages[index], chunk.toString()); });
index += 1; stream.on('end', function() {
}); server.shutdown();
stream.on('end', function() { done();
server.shutdown();
done();
});
}); });
}); });
}); });
}); });

@ -33,7 +33,6 @@
var assert = require('assert'); var assert = require('assert');
var grpc = require('bindings')('grpc.node'); var grpc = require('bindings')('grpc.node');
var port_picker = require('../port_picker');
/** /**
* This is used for testing functions with multiple asynchronous calls that * This is used for testing functions with multiple asynchronous calls that
@ -58,143 +57,139 @@ function multiDone(done, count) {
describe('end-to-end', function() { describe('end-to-end', function() {
it('should start and end a request without error', function(complete) { it('should start and end a request without error', function(complete) {
port_picker.nextAvailablePort(function(port) { var server = new grpc.Server();
var server = new grpc.Server(); var done = multiDone(function() {
var done = multiDone(function() { complete();
complete(); server.shutdown();
server.shutdown(); }, 2);
}, 2); var port_num = server.addHttp2Port('0.0.0.0:0');
server.addHttp2Port(port); var channel = new grpc.Channel('localhost:' + port_num);
var channel = new grpc.Channel(port); var deadline = new Date();
var deadline = new Date(); deadline.setSeconds(deadline.getSeconds() + 3);
deadline.setSeconds(deadline.getSeconds() + 3); var status_text = 'xyz';
var status_text = 'xyz'; var call = new grpc.Call(channel,
var call = new grpc.Call(channel, 'dummy_method',
'dummy_method', deadline);
deadline); call.startInvoke(function(event) {
call.startInvoke(function(event) { assert.strictEqual(event.type,
assert.strictEqual(event.type, grpc.completionType.INVOKE_ACCEPTED);
grpc.completionType.INVOKE_ACCEPTED);
call.writesDone(function(event) { call.writesDone(function(event) {
assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
});
},function(event) {
assert.strictEqual(event.type, assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ); grpc.completionType.FINISH_ACCEPTED);
},function(event) { assert.strictEqual(event.data, grpc.opError.OK);
});
},function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
},function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED);
var status = event.data;
assert.strictEqual(status.code, grpc.status.OK);
assert.strictEqual(status.details, status_text);
done();
}, 0);
server.start();
server.requestCall(function(event) {
assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
var server_call = event.call;
assert.notEqual(server_call, null);
server_call.serverAccept(function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED); assert.strictEqual(event.type, grpc.completionType.FINISHED);
var status = event.data;
assert.strictEqual(status.code, grpc.status.OK);
assert.strictEqual(status.details, status_text);
done();
}, 0); }, 0);
server_call.serverEndInitialMetadata(0);
server_call.startWriteStatus(
grpc.status.OK,
status_text,
function(event) {
assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
done();
});
});
});
server.start(); it('should send and receive data without error', function(complete) {
server.requestCall(function(event) { var req_text = 'client_request';
assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW); var reply_text = 'server_response';
var server_call = event.call; var server = new grpc.Server();
assert.notEqual(server_call, null); var done = multiDone(function() {
server_call.serverAccept(function(event) { complete();
assert.strictEqual(event.type, grpc.completionType.FINISHED); server.shutdown();
}, 0); }, 6);
server_call.serverEndInitialMetadata(0); var port_num = server.addHttp2Port('0.0.0.0:0');
server_call.startWriteStatus( var channel = new grpc.Channel('localhost:' + port_num);
grpc.status.OK, var deadline = new Date();
status_text, deadline.setSeconds(deadline.getSeconds() + 3);
function(event) { var status_text = 'success';
var call = new grpc.Call(channel,
'dummy_method',
deadline);
call.startInvoke(function(event) {
assert.strictEqual(event.type,
grpc.completionType.INVOKE_ACCEPTED);
call.startWrite(
new Buffer(req_text),
function(event) {
assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
call.writesDone(function(event) {
assert.strictEqual(event.type, assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED); grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK); assert.strictEqual(event.data, grpc.opError.OK);
done(); done();
}); });
}, 0);
call.startRead(function(event) {
assert.strictEqual(event.type, grpc.completionType.READ);
assert.strictEqual(event.data.toString(), reply_text);
done();
}); });
}); },function(event) {
}); assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
done();
},function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED);
var status = event.data;
assert.strictEqual(status.code, grpc.status.OK);
assert.strictEqual(status.details, status_text);
done();
}, 0);
it('should send and receive data without error', function(complete) { server.start();
port_picker.nextAvailablePort(function(port) { server.requestCall(function(event) {
var req_text = 'client_request'; assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
var reply_text = 'server_response'; var server_call = event.call;
var server = new grpc.Server(); assert.notEqual(server_call, null);
var done = multiDone(function() { server_call.serverAccept(function(event) {
complete(); assert.strictEqual(event.type, grpc.completionType.FINISHED);
server.shutdown(); done();
}, 6); });
server.addHttp2Port(port); server_call.serverEndInitialMetadata(0);
var channel = new grpc.Channel(port); server_call.startRead(function(event) {
var deadline = new Date(); assert.strictEqual(event.type, grpc.completionType.READ);
deadline.setSeconds(deadline.getSeconds() + 3); assert.strictEqual(event.data.toString(), req_text);
var status_text = 'success'; server_call.startWrite(
var call = new grpc.Call(channel, new Buffer(reply_text),
'dummy_method',
deadline);
call.startInvoke(function(event) {
assert.strictEqual(event.type,
grpc.completionType.INVOKE_ACCEPTED);
call.startWrite(
new Buffer(req_text),
function(event) { function(event) {
assert.strictEqual(event.type, assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED); grpc.completionType.WRITE_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK); assert.strictEqual(event.data,
call.writesDone(function(event) { grpc.opError.OK);
assert.strictEqual(event.type, server_call.startWriteStatus(
grpc.completionType.FINISH_ACCEPTED); grpc.status.OK,
assert.strictEqual(event.data, grpc.opError.OK); status_text,
done(); function(event) {
}); assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
done();
});
}, 0); }, 0);
call.startRead(function(event) {
assert.strictEqual(event.type, grpc.completionType.READ);
assert.strictEqual(event.data.toString(), reply_text);
done();
});
},function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
done();
},function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED);
var status = event.data;
assert.strictEqual(status.code, grpc.status.OK);
assert.strictEqual(status.details, status_text);
done();
}, 0);
server.start();
server.requestCall(function(event) {
assert.strictEqual(event.type, grpc.completionType.SERVER_RPC_NEW);
var server_call = event.call;
assert.notEqual(server_call, null);
server_call.serverAccept(function(event) {
assert.strictEqual(event.type, grpc.completionType.FINISHED);
done();
});
server_call.serverEndInitialMetadata(0);
server_call.startRead(function(event) {
assert.strictEqual(event.type, grpc.completionType.READ);
assert.strictEqual(event.data.toString(), req_text);
server_call.startWrite(
new Buffer(reply_text),
function(event) {
assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED);
assert.strictEqual(event.data,
grpc.opError.OK);
server_call.startWriteStatus(
grpc.status.OK,
status_text,
function(event) {
assert.strictEqual(event.type,
grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
done();
});
}, 0);
});
}); });
}); });
}); });

@ -34,8 +34,6 @@
var interop_server = require('../interop/interop_server.js'); var interop_server = require('../interop/interop_server.js');
var interop_client = require('../interop/interop_client.js'); var interop_client = require('../interop/interop_client.js');
var port_picker = require('../port_picker');
var server; var server;
var port; var port;
@ -44,15 +42,14 @@ var name_override = 'foo.test.google.com';
describe('Interop tests', function() { describe('Interop tests', function() {
before(function(done) { before(function(done) {
port_picker.nextAvailablePort(function(addr) { var server_obj = interop_server.getServer(0, true);
server = interop_server.getServer(addr.substring(addr.indexOf(':') + 1), true); server = server_obj.server;
server.listen(); server.listen();
port = addr; port = 'localhost:' + server_obj.port;
done(); done();
});
}); });
// This depends on not using a binary stream // This depends on not using a binary stream
it.skip('should pass empty_unary', function(done) { it('should pass empty_unary', function(done) {
interop_client.runTest(port, name_override, 'empty_unary', true, done); interop_client.runTest(port, name_override, 'empty_unary', true, done);
}); });
it('should pass large_unary', function(done) { it('should pass large_unary', function(done) {

@ -32,7 +32,6 @@
*/ */
var assert = require('assert'); var assert = require('assert');
var port_picker = require('../port_picker');
var grpc = require('..'); var grpc = require('..');
var math = grpc.load(__dirname + '/../examples/math.proto').math; var math = grpc.load(__dirname + '/../examples/math.proto').math;
@ -50,11 +49,10 @@ var server = require('../examples/math_server.js');
describe('Math client', function() { describe('Math client', function() {
before(function(done) { before(function(done) {
port_picker.nextAvailablePort(function(port) { var port_num = server.bind('0.0.0.0:0');
server.bind(port).listen(); server.listen();
math_client = new math.Math(port); math_client = new math.Math('localhost:' + port_num);
done(); done();
});
}); });
after(function() { after(function() {
server.shutdown(); server.shutdown();

@ -34,7 +34,6 @@
var assert = require('assert'); var assert = require('assert');
var grpc = require('bindings')('grpc.node'); var grpc = require('bindings')('grpc.node');
var Server = require('../server'); var Server = require('../server');
var port_picker = require('../port_picker');
/** /**
* This is used for testing functions with multiple asynchronous calls that * This is used for testing functions with multiple asynchronous calls that
@ -68,54 +67,52 @@ function echoHandler(stream) {
describe('echo server', function() { describe('echo server', function() {
it('should echo inputs as responses', function(done) { it('should echo inputs as responses', function(done) {
done = multiDone(done, 4); done = multiDone(done, 4);
port_picker.nextAvailablePort(function(port) { var server = new Server();
var server = new Server(); var port_num = server.bind('[::]:0');
server.bind(port); server.register('echo', echoHandler);
server.register('echo', echoHandler); server.start();
server.start();
var req_text = 'echo test string'; var req_text = 'echo test string';
var status_text = 'OK'; var status_text = 'OK';
var channel = new grpc.Channel(port); var channel = new grpc.Channel('localhost:' + port_num);
var deadline = new Date(); var deadline = new Date();
deadline.setSeconds(deadline.getSeconds() + 3); deadline.setSeconds(deadline.getSeconds() + 3);
var call = new grpc.Call(channel, var call = new grpc.Call(channel,
'echo', 'echo',
deadline); deadline);
call.startInvoke(function(event) { call.startInvoke(function(event) {
assert.strictEqual(event.type, assert.strictEqual(event.type,
grpc.completionType.INVOKE_ACCEPTED); grpc.completionType.INVOKE_ACCEPTED);
call.startWrite( call.startWrite(
new Buffer(req_text), new Buffer(req_text),
function(event) { function(event) {
assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK);
call.writesDone(function(event) {
assert.strictEqual(event.type, assert.strictEqual(event.type,
grpc.completionType.WRITE_ACCEPTED); grpc.completionType.FINISH_ACCEPTED);
assert.strictEqual(event.data, grpc.opError.OK); assert.strictEqual(event.data, grpc.opError.OK);
call.writesDone(function(event) { done();
assert.strictEqual(event.type, });
grpc.completionType.FINISH_ACCEPTED); }, 0);
assert.strictEqual(event.data, grpc.opError.OK); call.startRead(function(event) {
done(); assert.strictEqual(event.type, grpc.completionType.READ);
}); assert.strictEqual(event.data.toString(), req_text);
}, 0);
call.startRead(function(event) {
assert.strictEqual(event.type, grpc.completionType.READ);
assert.strictEqual(event.data.toString(), req_text);
done();
});
},function(event) {
assert.strictEqual(event.type,
grpc.completionType.CLIENT_METADATA_READ);
done(); done();
},function(event) { });
assert.strictEqual(event.type, grpc.completionType.FINISHED); },function(event) {
var status = event.data; assert.strictEqual(event.type,
assert.strictEqual(status.code, grpc.status.OK); grpc.completionType.CLIENT_METADATA_READ);
assert.strictEqual(status.details, status_text); done();
server.shutdown(); },function(event) {
done(); assert.strictEqual(event.type, grpc.completionType.FINISHED);
}, 0); var status = event.data;
}); assert.strictEqual(status.code, grpc.status.OK);
assert.strictEqual(status.details, status_text);
server.shutdown();
done();
}, 0);
}); });
}); });

@ -81,6 +81,8 @@ PHP_METHOD(Credentials, createSsl) {
int root_certs_length, private_key_length = 0, cert_chain_length = 0; int root_certs_length, private_key_length = 0, cert_chain_length = 0;
pem_key_cert_pair.private_key = pem_key_cert_pair.cert_chain = NULL;
/* "s|s!s! == 1 string, 2 optional nullable strings */ /* "s|s!s! == 1 string, 2 optional nullable strings */
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s!s!", if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s!s!",
&pem_root_certs, &root_certs_length, &pem_root_certs, &root_certs_length,

@ -1,4 +1,4 @@
# Copyright 2014, Google Inc. # Copyright 2015, Google Inc.
# All rights reserved. # All rights reserved.
# #
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
@ -27,31 +27,38 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
require 'beefcake' """Tests for google3.net.rpc.python.framework.foundation.logging_pool."""
module Beefcake import unittest
# Re-open the beefcake message module to add a static encode
# from _framework.foundation import logging_pool
# This is a temporary measure while beefcake is used as the default proto
# library for developing grpc ruby. Once that changes to the official proto _POOL_SIZE = 16
# library this can be removed. It's necessary to allow the update the service
# module to assume a static encode method.
# TODO(temiola): remove this. class LoggingPoolTest(unittest.TestCase):
module Message
# additional mixin module that adds static encode method when include def testUpAndDown(self):
module StaticEncode pool = logging_pool.pool(_POOL_SIZE)
# encodes o with its instance#encode method pool.shutdown(wait=True)
def encode(o)
o.encode with logging_pool.pool(_POOL_SIZE) as pool:
end self.assertIsNotNone(pool)
end
def testTaskExecuted(self):
# extend self.included in Beefcake::Message to include StaticEncode test_list = []
def self.included(o)
o.extend StaticEncode with logging_pool.pool(_POOL_SIZE) as pool:
o.extend Dsl pool.submit(lambda: test_list.append(object())).result()
o.extend Decode
o.send(:include, Encode) self.assertTrue(test_list)
end
end def testException(self):
end with logging_pool.pool(_POOL_SIZE) as pool:
raised_exception = pool.submit(lambda: 1/0).exception()
self.assertIsNotNone(raised_exception)
if __name__ == '__main__':
unittest.main()

@ -0,0 +1,83 @@
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A thread pool that logs exceptions raised by tasks executed within it."""
import functools
import logging
from concurrent import futures
def _wrap(behavior):
"""Wraps an arbitrary callable behavior in exception-logging."""
@functools.wraps(behavior)
def _wrapping(*args, **kwargs):
try:
return behavior(*args, **kwargs)
except Exception as e:
logging.exception('Unexpected exception from task run in logging pool!')
raise
return _wrapping
class _LoggingPool(object):
"""An exception-logging futures.ThreadPoolExecutor-compatible thread pool."""
def __init__(self, backing_pool):
self._backing_pool = backing_pool
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._backing_pool.shutdown(wait=True)
def submit(self, fn, *args, **kwargs):
return self._backing_pool.submit(_wrap(fn), *args, **kwargs)
def map(self, func, *iterables, **kwargs):
return self._backing_pool.map(
_wrap(func), *iterables, timeout=kwargs.get('timeout', None))
def shutdown(self, wait=True):
self._backing_pool.shutdown(wait=wait)
def pool(max_workers):
"""Creates a thread pool that logs exceptions raised by the tasks within it.
Args:
max_workers: The maximum number of worker threads to allow the pool.
Returns:
A futures.ThreadPoolExecutor-compatible thread pool that logs exceptions
raised by the tasks executed within it.
"""
return _LoggingPool(futures.ThreadPoolExecutor(max_workers))

@ -1,64 +1,63 @@
Ruby for GRPC gRPC Ruby
============= =========
LAYOUT A Ruby implementation of gRPC, Google's RPC library.
------
Directory structure is the recommended layout for [ruby extensions](http://guides.rubygems.org/gems-with-extensions/)
* ext: the extension code INSTALLATION PREREQUISITES
* lib: the entrypoint grpc ruby library to be used in a 'require' statement --------------------------
* test: tests
This requires Ruby 2.x, as the rpc api surface uses keyword args.
DEPENDENCIES
------------
INSTALLING
----------
* Extension - Install the gRPC core library
TODO: describe this, once the core distribution mechanism is defined.
The extension can be built and tested using $ gem install grpc
[rake](https://rubygems.org/gems/rake). However, the rake-extensiontask rule
is not supported on older versions of rubygems, and the necessary version of
rubygems.
This is resolved by using [RVM](https://rvm.io/) instead; install a single-user
ruby environment, and develop on the latest stable version of ruby (2.1.5).
Installing from source
----------------------
INSTALLATION PREREQUISITES - Build or Install the gRPC core
-------------------------- E.g, from the root of the grpc [git repo](https://github.com/google/grpc)
$ cd ../..
Install RVM $ make && sudo make install
- Install Ruby 2.x. Consider doing this with [RVM](http://rvm.io), it's a nice way of controlling
the exact ruby version that's used.
$ command curl -sSL https://rvm.io/mpapis.asc | gpg --import - $ command curl -sSL https://rvm.io/mpapis.asc | gpg --import -
$ \curl -sSL https://get.rvm.io | bash -s stable --ruby $ \curl -sSL https://get.rvm.io | bash -s stable --ruby
$ $
$ # follow the instructions to ensure that your're using the latest stable version of Ruby $ # follow the instructions to ensure that your're using the latest stable version of Ruby
$ # and that the rvm command is installed $ # and that the rvm command is installed
$
$ gem install bundler # install bundler, the standard ruby package manager
HACKING - Install [bundler](http://bundler.io/)
------- $ gem install bundler
The extension can be built and tested using the Rakefile. - Finally, install grpc ruby locally.
$ cd <install_dir>
$ bundle install
$ rake # compiles the extension, runs the unit tests, see rake -T for other options
$ # create a workspace
$ git5 start <your-git5-branch> net/grpc
$
$ # build the C library and install it in $HOME/grpc_dev
$ <google3>/net/grpc/c/build_gyp/build_grpc_dev.sh
$
$ # build the ruby extension and test it.
$ cd google3_dir/net/grpc/ruby
$ rake
Finally, install grpc ruby locally. CONTENTS
--------
$ cd <this_dir> Directory structure is the layout for [ruby extensions](http://guides.rubygems.org/gems-with-extensions/)
$
$ # update the Gemfile, modify the line beginning # gem 'beefcake' to refer to * ext: the extension code
$ # the patched beefcake dir * lib: the entrypoint grpc ruby library to be used in a 'require' statement
$ * spec: tests
$ bundle install * bin: example gRPC clients and servers, e.g,
```ruby
# client
stub = Math::Math::Stub.new('my.test.math.server.com:8080')
req = Math::DivArgs.new(dividend: 7, divisor: 3)
logger.info("div(7/3): req=#{req.inspect}")
resp = stub.div(req, INFINITE_FUTURE)
logger.info("Answer: #{resp.inspect}")
```

@ -1,11 +1,8 @@
Interop test protos Interop test protos
=================== ===================
These were generated by a patched version of beefcake and a patched version of These ruby classes were generated with protoc v3, using grpc's ruby compiler
protoc. plugin.
- set up and access of the patched versions is described in ../../README.md - As of 2015/01 protoc v3 is available in the
[google-protobuf](https://github.com/google/protobuf) repo
The actual test proto is found in Google3 at
- third_party/stubby/testing/proto/test.proto

@ -145,8 +145,8 @@ class TestTarget < Grpc::Testing::TestService::Service
end end
def half_duplex_call(reqs) def half_duplex_call(reqs)
# TODO(temiola): clarify the behaviour of the half_duplex_call, it's not # TODO: update with unique behaviour of the half_duplex_call if that's
# currently used in any tests # ever required by any of the tests.
full_duplex_call(reqs) full_duplex_call(reqs)
end end
end end

@ -68,13 +68,9 @@ $CFLAGS << ' -Wno-return-type '
$CFLAGS << ' -Wall ' $CFLAGS << ' -Wall '
$CFLAGS << ' -pedantic ' $CFLAGS << ' -pedantic '
$LDFLAGS << ' -lgrpc -lgpr' $LDFLAGS << ' -lgrpc -lgpr -ldl'
# crash('need grpc lib') unless have_library('grpc', 'grpc_channel_destroy')
#
# TODO(temiola): figure out why this stopped working, but the so is built OK
# and the tests pass
crash('need grpc lib') unless have_library('grpc', 'grpc_channel_destroy')
have_library('grpc', 'grpc_channel_destroy') have_library('grpc', 'grpc_channel_destroy')
crash('need gpr lib') unless have_library('gpr', 'gpr_now') crash('need gpr lib') unless have_library('gpr', 'gpr_now')
create_makefile('grpc/grpc') create_makefile('grpc/grpc')

@ -75,7 +75,7 @@ static void grpc_rb_completion_queue_shutdown_drain(grpc_completion_queue *cq) {
grpc_completion_queue_shutdown(cq); grpc_completion_queue_shutdown(cq);
next_call.cq = cq; next_call.cq = cq;
next_call.event = NULL; next_call.event = NULL;
/* TODO(temiola): the timeout should be a module level constant that defaults /* TODO: the timeout should be a module level constant that defaults
* to gpr_inf_future. * to gpr_inf_future.
* *
* - at the moment this does not work, it stalls. Using a small timeout like * - at the moment this does not work, it stalls. Using a small timeout like

@ -5,11 +5,11 @@ require 'grpc/version'
Gem::Specification.new do |s| Gem::Specification.new do |s|
s.name = 'grpc' s.name = 'grpc'
s.version = Google::RPC::VERSION s.version = Google::RPC::VERSION
s.authors = ['One Platform Team'] s.authors = ['gRPC Authors']
s.email = 'stubby-team@google.com' s.email = 'tbetbetbe@gmail.com'
s.homepage = 'http://go/grpc' s.homepage = 'https://github.com/google/grpc/tree/master/src/ruby'
s.summary = 'Google RPC system in Ruby' s.summary = 'Google RPC system in Ruby'
s.description = 'Send RPCs from Ruby' s.description = 'Send RPCs from Ruby using Google\'s RPC system'
s.files = `git ls-files`.split("\n") s.files = `git ls-files`.split("\n")
s.test_files = `git ls-files -- spec/*`.split("\n") s.test_files = `git ls-files -- spec/*`.split("\n")

@ -142,7 +142,7 @@ module Google
# during bidi-streaming, read the requests to send from a separate thread # during bidi-streaming, read the requests to send from a separate thread
# read so that read_loop does not block waiting for requests to read. # read so that read_loop does not block waiting for requests to read.
def start_write_loop(requests, is_client: true) def start_write_loop(requests, is_client: true)
Thread.new do # TODO(temiola) run on a thread pool Thread.new do # TODO: run on a thread pool
write_tag = Object.new write_tag = Object.new
begin begin
count = 0 count = 0

@ -233,10 +233,6 @@ module Google
end end
def new_active_server_call(call, new_server_rpc) def new_active_server_call(call, new_server_rpc)
# TODO(temiola): perhaps reuse the main server completion queue here,
# but for now, create a new completion queue per call, pending best
# practice usage advice from the c core.
# Accept the call. This is necessary even if a status is to be sent # Accept the call. This is necessary even if a status is to be sent
# back immediately # back immediately
finished_tag = Object.new finished_tag = Object.new
@ -340,7 +336,7 @@ module Google
@workers.size.times { schedule { throw :exit } } @workers.size.times { schedule { throw :exit } }
@stopped = true @stopped = true
# TODO(temiola): allow configuration of the keepalive period # TODO: allow configuration of the keepalive period
keep_alive = 5 keep_alive = 5
@stop_mutex.synchronize do @stop_mutex.synchronize do
@stop_cond.wait(@stop_mutex, keep_alive) if @workers.size > 0 @stop_cond.wait(@stop_mutex, keep_alive) if @workers.size > 0

@ -34,7 +34,7 @@ include Logging.globally # logger is accessible everywhere
Logging.logger.root.appenders = Logging.appenders.stdout Logging.logger.root.appenders = Logging.appenders.stdout
Logging.logger.root.level = :info Logging.logger.root.level = :info
# TODO(temiola): provide command-line configuration for logging # TODO: provide command-line configuration for logging
Logging.logger['Google::RPC'].level = :debug Logging.logger['Google::RPC'].level = :debug
Logging.logger['Google::RPC::ActiveCall'].level = :info Logging.logger['Google::RPC::ActiveCall'].level = :info
Logging.logger['Google::RPC::BidiCall'].level = :info Logging.logger['Google::RPC::BidiCall'].level = :info

@ -294,7 +294,7 @@ shared_examples 'GRPC metadata delivery works OK' do
expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag) expect_next_event_on(@server_queue, WRITE_ACCEPTED, @server_tag)
# there is the HTTP status metadata, though there should not be any # there is the HTTP status metadata, though there should not be any
# TODO(temiola): update this with the bug number to be resolved # TODO: update this with the bug number to be resolved
ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag) ev = expect_next_event_on(@client_queue, CLIENT_METADATA_READ, @tag)
expect(ev.result).to eq(':status' => '200') expect(ev.result).to eq(':status' => '200')
end end

@ -1,4 +1 @@
These are test keys *NOT* to be used in production. These are test keys *NOT* to be used in production.
http://go/keyhunt requires this README
CONFIRMEDTESTKEY

@ -19,6 +19,14 @@
return 'gens/' + m.group(1) + '.pb.cc' return 'gens/' + m.group(1) + '.pb.cc'
%> %>
# Basic platform detection
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
ifeq ($(SYSTEM),)
SYSTEM = $(HOST_SYSTEM)
endif
# Configurations # Configurations
VALID_CONFIG_opt = 1 VALID_CONFIG_opt = 1
@ -132,10 +140,15 @@ LDFLAGS += $(LDFLAGS_$(CONFIG))
CFLAGS += -std=c89 -pedantic CFLAGS += -std=c89 -pedantic
CXXFLAGS += -std=c++11 CXXFLAGS += -std=c++11
CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long CPPFLAGS += -g -fPIC -Wall -Werror -Wno-long-long
LDFLAGS += -g -pthread -fPIC LDFLAGS += -g -fPIC
INCLUDES = . include gens INCLUDES = . include gens
ifeq ($(SYSTEM),Darwin)
LIBS = m z
else
LIBS = rt m z pthread LIBS = rt m z pthread
LDFLAGS += -pthread
endif
LIBSXX = protobuf LIBSXX = protobuf
LIBS_PROTOC = protoc protobuf LIBS_PROTOC = protoc protobuf
@ -173,11 +186,6 @@ HOST_LDLIBS = $(LDLIBS)
# These are automatically computed variables. # These are automatically computed variables.
# There shouldn't be any need to change anything from now on. # There shouldn't be any need to change anything from now on.
HOST_SYSTEM = $(shell uname | cut -f 1 -d_)
ifeq ($(SYSTEM),)
SYSTEM = $(HOST_SYSTEM)
endif
ifeq ($(SYSTEM),MINGW32) ifeq ($(SYSTEM),MINGW32)
SHARED_EXT = dll SHARED_EXT = dll
endif endif
@ -340,8 +348,12 @@ libs/$(CONFIG)/zlib/libz.a:
$(Q)cp third_party/zlib/libz.a libs/$(CONFIG)/zlib $(Q)cp third_party/zlib/libz.a libs/$(CONFIG)/zlib
libs/$(CONFIG)/openssl/libssl.a: libs/$(CONFIG)/openssl/libssl.a:
$(E) "[MAKE] Building openssl" $(E) "[MAKE] Building openssl for $(SYSTEM)"
ifeq ($(SYSTEM),Darwin)
$(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./Configure darwin64-x86_64-cc $(OPENSSL_CONFIG_$(CONFIG)))
else
$(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config $(OPENSSL_CONFIG_$(CONFIG))) $(Q)(cd third_party/openssl ; CC="$(CC) -fPIC -fvisibility=hidden $(CPPFLAGS_$(CONFIG)) $(OPENSSL_CFLAGS_$(CONFIG))" ./config $(OPENSSL_CONFIG_$(CONFIG)))
endif
$(Q)$(MAKE) -C third_party/openssl clean $(Q)$(MAKE) -C third_party/openssl clean
$(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl $(Q)$(MAKE) -C third_party/openssl build_crypto build_ssl
$(Q)mkdir -p libs/$(CONFIG)/openssl $(Q)mkdir -p libs/$(CONFIG)/openssl
@ -695,6 +707,7 @@ libs/$(CONFIG)/lib${lib.name}.a: $(ZLIB_DEP) $(LIB${lib.name.upper()}_OBJS)
% endif % endif
$(E) "[AR] Creating $@" $(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@` $(Q) mkdir -p `dirname $@`
$(Q) rm -f libs/$(CONFIG)/lib${lib.name}.a
$(Q) $(AR) rcs libs/$(CONFIG)/lib${lib.name}.a $(LIB${lib.name.upper()}_OBJS) $(Q) $(AR) rcs libs/$(CONFIG)/lib${lib.name}.a $(LIB${lib.name.upper()}_OBJS)
% if lib.get('baselib', False): % if lib.get('baselib', False):
% if lib.get('secure', True): % if lib.get('secure', True):
@ -707,6 +720,9 @@ libs/$(CONFIG)/lib${lib.name}.a: $(ZLIB_DEP) $(LIB${lib.name.upper()}_OBJS)
$(Q) rm -rf tmp-merge $(Q) rm -rf tmp-merge
% endif % endif
% endif % endif
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib libs/$(CONFIG)/lib${lib.name}.a
endif
<% <%
if lib.language == 'c++': if lib.language == 'c++':

@ -56,8 +56,8 @@
typedef struct metadata { typedef struct metadata {
size_t count; size_t count;
size_t cap; size_t cap;
const char **keys; char **keys;
const char **values; char **values;
} metadata; } metadata;
/* details what we expect to find on a single event - and forms a linked /* details what we expect to find on a single event - and forms a linked
@ -409,11 +409,11 @@ static metadata *metadata_from_args(va_list args) {
if (md->cap == md->count) { if (md->cap == md->count) {
md->cap = GPR_MAX(md->cap + 1, md->cap * 3 / 2); md->cap = GPR_MAX(md->cap + 1, md->cap * 3 / 2);
md->keys = gpr_realloc(md->keys, sizeof(const char *) * md->cap); md->keys = gpr_realloc(md->keys, sizeof(char *) * md->cap);
md->values = gpr_realloc(md->values, sizeof(const char *) * md->cap); md->values = gpr_realloc(md->values, sizeof(char *) * md->cap);
} }
md->keys[md->count] = key; md->keys[md->count] = (char *)key;
md->values[md->count] = value; md->values[md->count] = (char *)value;
md->count++; md->count++;
} }
} }

@ -37,6 +37,7 @@
#include <errno.h> #include <errno.h>
#include <fcntl.h> #include <fcntl.h>
#include <netinet/in.h> #include <netinet/in.h>
#include <poll.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -79,7 +80,7 @@ static void create_test_socket(int port, int *socket_fd,
/* Use local address for test */ /* Use local address for test */
sin->sin_family = AF_INET; sin->sin_family = AF_INET;
sin->sin_addr.s_addr = 0; sin->sin_addr.s_addr = htonl(0x7f000001);
sin->sin_port = htons(port); sin->sin_port = htons(port);
} }
@ -164,7 +165,7 @@ static void session_read_cb(void *arg, /*session*/
grpc_fd_notify_on_read(se->em_fd, session_read_cb, se); grpc_fd_notify_on_read(se->em_fd, session_read_cb, se);
} else { } else {
gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno)); gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno));
GPR_ASSERT(0); abort();
} }
} }
} }
@ -316,7 +317,7 @@ static void client_session_write(void *arg, /*client*/
gpr_mu_unlock(&cl->mu); gpr_mu_unlock(&cl->mu);
} else { } else {
gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno)); gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno));
GPR_ASSERT(0); abort();
} }
} }
@ -325,10 +326,20 @@ static void client_start(client *cl, int port) {
int fd; int fd;
struct sockaddr_in sin; struct sockaddr_in sin;
create_test_socket(port, &fd, &sin); create_test_socket(port, &fd, &sin);
if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1 && if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
errno != EINPROGRESS) { if (errno == EINPROGRESS) {
gpr_log(GPR_ERROR, "Failed to connect to the server"); struct pollfd pfd;
GPR_ASSERT(0); pfd.fd = fd;
pfd.events = POLLOUT;
pfd.revents = 0;
if (poll(&pfd, 1, -1) == -1) {
gpr_log(GPR_ERROR, "poll() failed during connect; errno=%d", errno);
abort();
}
} else {
gpr_log(GPR_ERROR, "Failed to connect to the server (errno=%d)", errno);
abort();
}
} }
cl->em_fd = grpc_fd_create(fd); cl->em_fd = grpc_fd_create(fd);

@ -32,6 +32,7 @@
*/ */
#include "src/core/iomgr/resolve_address.h" #include "src/core/iomgr/resolve_address.h"
#include "src/core/iomgr/iomgr.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/sync.h> #include <grpc/support/sync.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
@ -122,7 +123,7 @@ static void test_unparseable_hostports(void) {
int main(int argc, char** argv) { int main(int argc, char** argv) {
grpc_test_init(argc, argv); grpc_test_init(argc, argv);
grpc_iomgr_init();
test_localhost(); test_localhost();
test_default_port(); test_default_port();
test_missing_default_port(); test_missing_default_port();
@ -130,6 +131,6 @@ int main(int argc, char** argv) {
test_ipv6_without_port(); test_ipv6_without_port();
test_invalid_ip_addresses(); test_invalid_ip_addresses();
test_unparseable_hostports(); test_unparseable_hostports();
grpc_iomgr_shutdown();
return 0; return 0;
} }

@ -213,9 +213,9 @@ static void test_sockaddr_to_string(void) {
expect_sockaddr_str("[::fffe:c000:263]:12345", &input6, 1); expect_sockaddr_str("[::fffe:c000:263]:12345", &input6, 1);
memset(&dummy, 0, sizeof(dummy)); memset(&dummy, 0, sizeof(dummy));
dummy.sa_family = 999; dummy.sa_family = 123;
expect_sockaddr_str("(sockaddr family=999)", &dummy, 0); expect_sockaddr_str("(sockaddr family=123)", &dummy, 0);
expect_sockaddr_str("(sockaddr family=999)", &dummy, 1); expect_sockaddr_str("(sockaddr family=123)", &dummy, 1);
GPR_ASSERT(errno == 0xDEADBEEF); GPR_ASSERT(errno == 0xDEADBEEF);
} }

@ -40,6 +40,7 @@
#include <unistd.h> #include <unistd.h>
#include "src/core/iomgr/iomgr.h" #include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/time.h> #include <grpc/support/time.h>
@ -138,7 +139,8 @@ void test_times_out(void) {
/* tie up the listen buffer, which is somewhat arbitrarily sized. */ /* tie up the listen buffer, which is somewhat arbitrarily sized. */
for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
client_fd[i] = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); client_fd[i] = socket(AF_INET, SOCK_STREAM, 0);
grpc_set_socket_nonblocking(client_fd[i], 1);
do { do {
r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len); r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
} while (r == -1 && errno == EINTR); } while (r == -1 && errno == EINTR);

@ -129,7 +129,8 @@ static void expect_metadata(test_stream *s, int from_client, const char *key,
/* Convert some number of seconds into a gpr_timespec that many seconds in the /* Convert some number of seconds into a gpr_timespec that many seconds in the
future */ future */
static gpr_timespec deadline_from_seconds(double deadline_seconds) { static gpr_timespec deadline_from_seconds(double deadline_seconds) {
return gpr_time_add(gpr_now(), gpr_time_from_micros(deadline_seconds * 1e6)); return gpr_time_add(gpr_now(),
gpr_time_from_micros((long)(deadline_seconds * 1e6)));
} }
/* Init a test_user_data instance */ /* Init a test_user_data instance */
@ -573,7 +574,7 @@ static grpc_transport_setup_result setup_client_transport(
name - the name of this test */ name - the name of this test */
static void begin_test(test_fixture *f, grpc_transport_test_config *config, static void begin_test(test_fixture *f, grpc_transport_test_config *config,
const char *name) { const char *name) {
gpr_timespec timeout = gpr_time_add(gpr_now(), gpr_time_from_micros(100e6)); gpr_timespec timeout = gpr_time_add(gpr_now(), gpr_time_from_seconds(100));
gpr_log(GPR_INFO, "BEGIN: %s/%s", name, config->name); gpr_log(GPR_INFO, "BEGIN: %s/%s", name, config->name);

@ -48,8 +48,10 @@ static int seed(void) { return _getpid(); }
#endif #endif
void grpc_test_init(int argc, char **argv) { void grpc_test_init(int argc, char **argv) {
#ifndef GPR_WIN32
/* disable SIGPIPE */ /* disable SIGPIPE */
signal(SIGPIPE, SIG_IGN); signal(SIGPIPE, SIG_IGN);
#endif
/* seed rng with pid, so we don't end up with the same random numbers as a /* seed rng with pid, so we don't end up with the same random numbers as a
concurrently running test binary */ concurrently running test binary */
srand(seed()); srand(seed());

@ -188,7 +188,7 @@ void DoResponseStreamingWithSlowConsumer(
grpc::ClientContext context; grpc::ClientContext context;
StreamingOutputCallRequest request; StreamingOutputCallRequest request;
for (unsigned int i = 0; i < kNumResponseMessages; ++i) { for (int i = 0; i < kNumResponseMessages; ++i) {
ResponseParameters* response_parameter = request.add_response_parameters(); ResponseParameters* response_parameter = request.add_response_parameters();
response_parameter->set_size(kResponseMessageSize); response_parameter->set_size(kResponseMessageSize);
} }
@ -196,7 +196,7 @@ void DoResponseStreamingWithSlowConsumer(
std::unique_ptr<grpc::ClientReader<StreamingOutputCallResponse>> stream( std::unique_ptr<grpc::ClientReader<StreamingOutputCallResponse>> stream(
stub->StreamingOutputCall(&context, &request)); stub->StreamingOutputCall(&context, &request));
unsigned int i = 0; int i = 0;
while (stream->Read(&response)) { while (stream->Read(&response)) {
GPR_ASSERT(response.payload().body() == GPR_ASSERT(response.payload().body() ==
grpc::string(kResponseMessageSize, '\0')); grpc::string(kResponseMessageSize, '\0'));

@ -0,0 +1,27 @@
# Dockerfile for gRPC Go
FROM golang:1.4
# Install SSH to that Go source can be pulled securely.
RUN apt-get update && apt-get install -y ssh
# Install a GitHub SSH service credential that gives access to the GitHub repo while it's private
#
# TODO: remove this once the repo is public
ADD .ssh .ssh
RUN chmod 600 /.ssh/github.rsa
RUN mkdir -p $HOME/.ssh && echo 'Host github.com' > $HOME/.ssh/config
RUN echo " IdentityFile /.ssh/github.rsa" >> $HOME/.ssh/config
RUN echo 'StrictHostKeyChecking no' >> $HOME/.ssh/config
# Force go get to use the GitHub ssh url instead of https, and use the SSH creds
RUN git config --global url."git@github.com:".insteadOf "https://github.com/"
# Get the source from GitHub
RUN go get github.com/google/grpc-go
# Build the interop client and server
RUN cd src/github.com/google/grpc-go/interop/client && go install
RUN cd src/github.com/google/grpc-go/interop/server && go install
# Specify the default command such that the interop server runs on its known testing port
CMD ["/bin/bash", "-c 'cd src/github.com/google/grpc-go/interop/server && go run server.go --use_tls=true --port=8020'"]

@ -0,0 +1,4 @@
GRPC Go Dockerfile
==================
Dockerfile for gRPC Go development, testing and deployment.

@ -86,6 +86,7 @@ grpc_add_docker_user() {
} }
_grpc_update_image_args() { _grpc_update_image_args() {
echo "image_args $@"
# default the host, root storage uri and docker file root # default the host, root storage uri and docker file root
grpc_gs_root='gs://tmp-grpc-dev/admin/' grpc_gs_root='gs://tmp-grpc-dev/admin/'
grpc_dockerfile_root='tools/dockerfile' grpc_dockerfile_root='tools/dockerfile'
@ -95,7 +96,7 @@ _grpc_update_image_args() {
# see if -p or -z is used to override the the project or zone # see if -p or -z is used to override the the project or zone
local OPTIND local OPTIND
local OPTARG local OPTARG
while getopts :r:d:h name while getopts :r:d:h: name
do do
case $name in case $name in
d) grpc_dockerfile_root=$OPTARG ;; d) grpc_dockerfile_root=$OPTARG ;;
@ -261,7 +262,7 @@ _grpc_set_project_and_zone() {
local OPTIND local OPTIND
local OPTARG local OPTARG
local arg_func local arg_func
while getopts :p:z:f:n name while getopts :np:z:f: name
do do
case $name in case $name in
f) declare -F $OPTARG >> /dev/null && { f) declare -F $OPTARG >> /dev/null && {
@ -392,6 +393,65 @@ grpc_interop_test_args() {
} }
} }
_grpc_sync_scripts_args() {
grpc_gce_script_root='tools/gce_setup'
local OPTIND
local OPTARG
while getopts :s: name
do
case $name in
s) grpc_gce_script_root=$OPTARG ;;
:) continue ;; # ignore -s without args, just use the defaults
\?) echo "-$OPTARG: unknown flag; it's ignored" 1>&2; continue ;;
esac
done
shift $((OPTIND-1))
[[ -d $grpc_gce_script_root ]] || {
echo "Could not locate gce script dir: $grpc_gce_script_root" 1>&2
return 1
}
[[ $# -lt 1 ]] && {
echo "$FUNCNAME: missing arg: host1 [host2 ... hostN]" 1>&2
return 1
}
grpc_hosts="$@"
}
# Updates the latest version of the support scripts on some hosts.
#
# call-seq;
# grpc_sync_scripts <server_name1>, <server_name2> .. <server_name3>
#
# Updates the GCE docker instance <server_name>
grpc_sync_scripts() {
_grpc_ensure_gcloud_ssh || return 1;
# declare vars local so that they don't pollute the shell environment
# where they this func is used.
local grpc_zone grpc_project dry_run # set by _grpc_set_project_and_zone
local grpc_hosts grpc_gce_script_root
# set the project zone and check that all necessary args are provided
_grpc_set_project_and_zone -f _grpc_sync_scripts_args "$@" || return 1
local func_lib="shared_startup_funcs.sh"
local gce_func_lib="/var/local/startup_scripts/$func_lib"
local project_opt="--project $grpc_project"
local zone_opt="--zone $grpc_zone"
local host
for host in $grpc_hosts
do
gce_has_instance $grpc_project $host || return 1;
# Update the remote copy of the GCE func library.
local src_func_lib="$grpc_gce_script_root/$func_lib"
local rmt_func_lib="$host:$gce_func_lib"
gcloud compute copy-files $src_func_lib $rmt_func_lib $project_opt $zone_opt || return 1
done
}
grpc_sync_images_args() { grpc_sync_images_args() {
[[ $# -lt 1 ]] && { [[ $# -lt 1 ]] && {
echo "$FUNCNAME: missing arg: host1 [host2 ... hostN]" 1>&2 echo "$FUNCNAME: missing arg: host1 [host2 ... hostN]" 1>&2
@ -412,7 +472,6 @@ grpc_sync_images() {
# declare vars local so that they don't pollute the shell environment # declare vars local so that they don't pollute the shell environment
# where they this func is used. # where they this func is used.
local grpc_zone grpc_project dry_run # set by _grpc_set_project_and_zone local grpc_zone grpc_project dry_run # set by _grpc_set_project_and_zone
# set by grpc_sync_images
local grpc_hosts local grpc_hosts
# set the project zone and check that all necessary args are provided # set the project zone and check that all necessary args are provided
@ -425,7 +484,7 @@ grpc_sync_images() {
local host local host
for host in $grpc_hosts for host in $grpc_hosts
do do
gce_has_instance $grpc_project $h || return 1; gce_has_instance $grpc_project $host || return 1;
local ssh_cmd="bash -l -c \"$cmd\"" local ssh_cmd="bash -l -c \"$cmd\""
echo "will run:" echo "will run:"
echo " $ssh_cmd" echo " $ssh_cmd"
@ -575,6 +634,18 @@ grpc_interop_gen_ruby_cmd() {
echo $the_cmd echo $the_cmd
} }
# constructs the full dockerized Go interop test cmd.
#
# call-seq:
# flags= .... # generic flags to include the command
# cmd=$($grpc_gen_test_cmd $flags)
grpc_interop_gen_go_cmd() {
local cmd_prefix="sudo docker run grpc/go bin/bash -c";
local test_script="cd /go/src/github.com/google/grpc-go/interop/client";
local test_script+=" && go run client.go --use_tls=true";
local the_cmd="$cmd_prefix '$test_script $@ 1>&2'";
}
# constructs the full dockerized java interop test cmd. # constructs the full dockerized java interop test cmd.
# #
# call-seq: # call-seq:
@ -605,4 +676,4 @@ grpc_interop_gen_php_cmd() {
} }
# TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs|go # TODO(grpc-team): add grpc_interop_gen_xxx_cmd for python|cxx|nodejs

@ -367,11 +367,12 @@ grpc_docker_launch_registry() {
grpc_docker_pull_known() { grpc_docker_pull_known() {
local addr=$1 local addr=$1
[[ -n $addr ]] || addr="0.0.0.0:5000" [[ -n $addr ]] || addr="0.0.0.0:5000"
local known="base cxx php_base php ruby_base ruby java_base java" local known="base cxx php_base php ruby_base ruby java_base java go"
echo "... pulling docker images for '$known'" echo "... pulling docker images for '$known'"
for i in $known for i in $known
do do
sudo docker pull ${addr}/grpc/$i \ echo "<--- grpc/$i"
sudo docker pull ${addr}/grpc/$i > /dev/null 2>&1 \
&& sudo docker tag ${addr}/grpc/$i grpc/$i || { && sudo docker tag ${addr}/grpc/$i grpc/$i || {
# log and continue # log and continue
echo "docker op error: could not pull ${addr}/grpc/$i" echo "docker op error: could not pull ${addr}/grpc/$i"
@ -402,10 +403,15 @@ grpc_dockerfile_install() {
[[ -d $dockerfile_dir ]] || { echo "$FUNCNAME: not a valid dir: $dockerfile_dir"; return 1; } [[ -d $dockerfile_dir ]] || { echo "$FUNCNAME: not a valid dir: $dockerfile_dir"; return 1; }
# For grpc/base, sync the ssh key into the .ssh dir in the dockerfile context # For specific base images, sync the ssh key into the .ssh dir in the dockerfile context
[[ $image_label == "grpc/base" ]] && { [[ $image_label == "grpc/base" ]] && {
grpc_docker_sync_github_key $dockerfile_dir/.ssh || return 1; grpc_docker_sync_github_key $dockerfile_dir/.ssh 'base_ssh_key'|| return 1;
}
[[ $image_label == "grpc/go" ]] && {
grpc_docker_sync_github_key $dockerfile_dir/.ssh 'go_ssh_key'|| return 1;
}
[[ $image_label == "grpc/java_base" ]] && {
grpc_docker_sync_github_key $dockerfile_dir/.ssh 'java_base_ssh_key'|| return 1;
} }
# TODO(temiola): maybe make cache/no-cache a func option? # TODO(temiola): maybe make cache/no-cache a func option?
@ -445,6 +451,9 @@ grpc_docker_sync_github_key() {
local target_dir=$1 local target_dir=$1
[[ -n $target_dir ]] || { echo "$FUNCNAME: missing arg: target_dir" >&2; return 1; } [[ -n $target_dir ]] || { echo "$FUNCNAME: missing arg: target_dir" >&2; return 1; }
local key_file=$2
[[ -n $key_file ]] || { echo "$FUNCNAME: missing arg: key_file" >&2; return 1; }
# determine the admin root; the parent of the dockerfile root, # determine the admin root; the parent of the dockerfile root,
local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root") local gs_dockerfile_root=$(load_metadata "attributes/gs_dockerfile_root")
[[ -n $gs_dockerfile_root ]] || { [[ -n $gs_dockerfile_root ]] || {
@ -454,7 +463,7 @@ grpc_docker_sync_github_key() {
local gcs_admin_root=$(dirname $gs_dockerfile_root) local gcs_admin_root=$(dirname $gs_dockerfile_root)
# cp the file from gsutil to a known local area # cp the file from gsutil to a known local area
local gcs_key_path=$gcs_admin_root/github/ssh_key local gcs_key_path=$gcs_admin_root/github/$key_file
local local_key_path=$target_dir/github.rsa local local_key_path=$target_dir/github.rsa
mkdir -p $target_dir || { mkdir -p $target_dir || {
echo "$FUNCNAME: could not create dir: $target_dir" 1>&2 echo "$FUNCNAME: could not create dir: $target_dir" 1>&2

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
# change to grpc repo root
cd $(dirname $0)/../..
root=`pwd`
virtualenv python2.7_virtual_environment
python2.7_virtual_environment/bin/pip install enum34==1.0.4 futures==2.2.0

@ -160,8 +160,8 @@ class Jobset(object):
self._completed += 1 self._completed += 1
self._running.remove(job) self._running.remove(job)
if dead: return if dead: return
message('WAITING', '%d jobs running, %d complete' % ( message('WAITING', '%d jobs running, %d complete, %d failed' % (
len(self._running), self._completed)) len(self._running), self._completed, self._failures))
time.sleep(0.1) time.sleep(0.1)
def cancelled(self): def cancelled(self):

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
# change to grpc repo root
cd $(dirname $0)/../..
root=`pwd`
python2.7_virtual_environment/bin/python2.7 -B -m unittest discover -s src/python -p '*.py'
python3.4 -B -m unittest discover -s src/python -p '*.py'

@ -75,6 +75,21 @@ class PhpLanguage(object):
return [['tools/run_tests/build_php.sh']] return [['tools/run_tests/build_php.sh']]
class PythonLanguage(object):
def __init__(self):
self.allow_hashing = False
def test_binaries(self, config):
return ['tools/run_tests/run_python.sh']
def make_targets(self):
return[]
def build_steps(self):
return [['tools/run_tests/build_python.sh']]
# different configurations we can run under # different configurations we can run under
_CONFIGS = { _CONFIGS = {
'dbg': SimpleConfig('dbg'), 'dbg': SimpleConfig('dbg'),
@ -92,7 +107,8 @@ _DEFAULT = ['dbg', 'opt']
_LANGUAGES = { _LANGUAGES = {
'c++': CLanguage('cxx', 'c++'), 'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'), 'c': CLanguage('c', 'c'),
'php': PhpLanguage() 'php': PhpLanguage(),
'python': PythonLanguage(),
} }
# parse command line # parse command line

@ -261,6 +261,10 @@
"language": "c++", "language": "c++",
"name": "end2end_test" "name": "end2end_test"
}, },
{
"language": "c++",
"name": "tips_client_test"
},
{ {
"language": "c++", "language": "c++",
"name": "status_test" "name": "status_test"

@ -121,7 +121,9 @@
<ClInclude Include="..\..\src\core\iomgr\pollset.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_kick.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_windows.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_windows.h" />
<ClInclude Include="..\..\src\core\iomgr\resolve_address.h" /> <ClInclude Include="..\..\src\core\iomgr\resolve_address.h" />
<ClInclude Include="..\..\src\core\iomgr\sockaddr.h" /> <ClInclude Include="..\..\src\core\iomgr\sockaddr.h" />
<ClInclude Include="..\..\src\core\iomgr\sockaddr_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\sockaddr_posix.h" />
@ -254,6 +256,8 @@
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_posix.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_posix.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_windows.c">
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\resolve_address_posix.c"> <ClCompile Include="..\..\src\core\iomgr\resolve_address_posix.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\sockaddr_utils.c"> <ClCompile Include="..\..\src\core\iomgr\sockaddr_utils.c">

@ -121,7 +121,9 @@
<ClInclude Include="..\..\src\core\iomgr\pollset.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_kick.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_kick_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_kick_windows.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\pollset_posix.h" />
<ClInclude Include="..\..\src\core\iomgr\pollset_windows.h" />
<ClInclude Include="..\..\src\core\iomgr\resolve_address.h" /> <ClInclude Include="..\..\src\core\iomgr\resolve_address.h" />
<ClInclude Include="..\..\src\core\iomgr\sockaddr.h" /> <ClInclude Include="..\..\src\core\iomgr\sockaddr.h" />
<ClInclude Include="..\..\src\core\iomgr\sockaddr_posix.h" /> <ClInclude Include="..\..\src\core\iomgr\sockaddr_posix.h" />
@ -254,6 +256,8 @@
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_posix.c"> <ClCompile Include="..\..\src\core\iomgr\pollset_posix.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\pollset_windows.c">
</ClCompile>
<ClCompile Include="..\..\src\core\iomgr\resolve_address_posix.c"> <ClCompile Include="..\..\src\core\iomgr\resolve_address_posix.c">
</ClCompile> </ClCompile>
<ClCompile Include="..\..\src\core\iomgr\sockaddr_utils.c"> <ClCompile Include="..\..\src\core\iomgr\sockaddr_utils.c">

Loading…
Cancel
Save